mirror of
https://github.com/minio/minio.git
synced 2026-02-08 11:50:15 -05:00
Compare commits
761 Commits
RELEASE.20
...
key-versio
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01cb705c36 | ||
|
|
9ea14c88d8 | ||
|
|
30a1261c22 | ||
|
|
0e017ab071 | ||
|
|
f14198e3dc | ||
|
|
93c389dbc9 | ||
|
|
ddd9a84cd7 | ||
|
|
b7540169a2 | ||
|
|
f01374950f | ||
|
|
18aceae620 | ||
|
|
427826abc5 | ||
|
|
2780778c10 | ||
|
|
2d8ba15b9e | ||
|
|
bd6dd55e7f | ||
|
|
0d7408fc99 | ||
|
|
864f80e226 | ||
|
|
0379d6a37f | ||
|
|
43aa8e4259 | ||
|
|
e2ed696619 | ||
|
|
fb3f67a597 | ||
|
|
7ee75368e0 | ||
|
|
1d6478b8ae | ||
|
|
0581001b6f | ||
|
|
479303e7e9 | ||
|
|
89aec6804b | ||
|
|
eb33bc6bf5 | ||
|
|
3310f740f0 | ||
|
|
4595293ca0 | ||
|
|
02a67cbd2a | ||
|
|
2b34e5b9ae | ||
|
|
a6258668a6 | ||
|
|
d0cada583f | ||
|
|
0bd8f06b62 | ||
|
|
6640be3bed | ||
|
|
eafeb27e90 | ||
|
|
f2c9eb0f79 | ||
|
|
f2619d1f62 | ||
|
|
8c70975283 | ||
|
|
01447d2438 | ||
|
|
07f31e574c | ||
|
|
8d223e07fb | ||
|
|
4041a8727c | ||
|
|
5f243fde9a | ||
|
|
a0e3f1cc18 | ||
|
|
b1bc641105 | ||
|
|
e0c8738230 | ||
|
|
9aa24b1920 | ||
|
|
53d40e41bc | ||
|
|
e88d494775 | ||
|
|
b67f0cf721 | ||
|
|
46922c71b7 | ||
|
|
670edb4fcf | ||
|
|
42d4ab2a0a | ||
|
|
5e2eb372bf | ||
|
|
cccb37a5ac | ||
|
|
dbf31af6cb | ||
|
|
93e40c3ab4 | ||
|
|
8aa0e9ff7c | ||
|
|
bbd6f18afb | ||
|
|
2a3acc4f24 | ||
|
|
11507d46da | ||
|
|
f9c62dea55 | ||
|
|
8c2c92f7af | ||
|
|
4c71f1b4ec | ||
|
|
6cd8a372cb | ||
|
|
953a3e2bbd | ||
|
|
7cc0c69228 | ||
|
|
f129fd48f2 | ||
|
|
bc4008ced4 | ||
|
|
526053339b | ||
|
|
62a35b3e77 | ||
|
|
39df134204 | ||
|
|
ad4cbce22d | ||
|
|
90f5e1e5f6 | ||
|
|
aeabac9181 | ||
|
|
b312f13473 | ||
|
|
727a803bc0 | ||
|
|
d0e443172d | ||
|
|
60446e7ac0 | ||
|
|
b8544266e5 | ||
|
|
437dd4e32a | ||
|
|
447054b841 | ||
|
|
9bf43e54cd | ||
|
|
60f8423157 | ||
|
|
4355ea3c3f | ||
|
|
e30f1ad7bd | ||
|
|
f00c8c4cce | ||
|
|
703f51164d | ||
|
|
b8dde47d4e | ||
|
|
7fa3e39f85 | ||
|
|
4df7a3aa8f | ||
|
|
64a8f2e554 | ||
|
|
f4fd4ea66d | ||
|
|
712fe1a8df | ||
|
|
4a319bedc9 | ||
|
|
bdb3db6dad | ||
|
|
abb385af41 | ||
|
|
4ee62606e4 | ||
|
|
079d64c801 | ||
|
|
dcc000ae2c | ||
|
|
c5d19ecebb | ||
|
|
ed29a525b3 | ||
|
|
020c46cd3c | ||
|
|
827004cd6d | ||
|
|
779ec8f0d4 | ||
|
|
3d0f513ee2 | ||
|
|
4b6eadbd80 | ||
|
|
6f47414b23 | ||
|
|
224a27992a | ||
|
|
232544e1d8 | ||
|
|
dbcb71828d | ||
|
|
b9196757fd | ||
|
|
b4ac53d157 | ||
|
|
4952bdb770 | ||
|
|
00b2ef2932 | ||
|
|
4536ecfaa4 | ||
|
|
43a7402968 | ||
|
|
330dca9a35 | ||
|
|
ddd137d317 | ||
|
|
06ddd8770e | ||
|
|
16f8cf1c52 | ||
|
|
01e520eb23 | ||
|
|
02f770a0c0 | ||
|
|
2f4c79bc0f | ||
|
|
969ee7dfbe | ||
|
|
5f0b086b05 | ||
|
|
68b004a48f | ||
|
|
54ecce66f0 | ||
|
|
2b008c598b | ||
|
|
86d02b17cf | ||
|
|
c1a95a70ac | ||
|
|
f246c9053f | ||
|
|
9cdd204ae4 | ||
|
|
7b3eb9f7f8 | ||
|
|
d56ef8dbe1 | ||
|
|
a248ed5ff5 | ||
|
|
5bb31e4883 | ||
|
|
aff2a76d80 | ||
|
|
eddbe6bca2 | ||
|
|
734d1e320a | ||
|
|
b8dab7b1a9 | ||
|
|
abd6bf060d | ||
|
|
f0d4ef604c | ||
|
|
2712f75762 | ||
|
|
4c46668da8 | ||
|
|
02e93fd6ba | ||
|
|
366876e98b | ||
|
|
d202fdd022 | ||
|
|
c07e5b49d4 | ||
|
|
9a39f8ad4d | ||
|
|
7e0c1c9413 | ||
|
|
485d833cd7 | ||
|
|
e8a476ef5a | ||
|
|
267f0ecea2 | ||
|
|
4ee3434854 | ||
|
|
0e9854372e | ||
|
|
b5177993b3 | ||
|
|
55f5c18fd9 | ||
|
|
8ce101c174 | ||
|
|
4972735507 | ||
|
|
e6ca6de194 | ||
|
|
cefc43e4da | ||
|
|
25e34fda5f | ||
|
|
4208d7af5a | ||
|
|
8d42f37e4b | ||
|
|
7cb4b5c636 | ||
|
|
1615920f48 | ||
|
|
7ee42b3ff5 | ||
|
|
a6f1e727fb | ||
|
|
c1fc7779ca | ||
|
|
b3ab7546ee | ||
|
|
ad88a81e3d | ||
|
|
c4239ced22 | ||
|
|
f85c28e960 | ||
|
|
f7e176d4ca | ||
|
|
72a0d14195 | ||
|
|
6abe4128d7 | ||
|
|
ed5ed7e490 | ||
|
|
51410c9023 | ||
|
|
96ca402dcd | ||
|
|
3da7c9cce3 | ||
|
|
a14e19ec54 | ||
|
|
e091dde041 | ||
|
|
d10bb7e1b6 | ||
|
|
7ebceacac6 | ||
|
|
1593cb615d | ||
|
|
86a41d1631 | ||
|
|
d4157b819c | ||
|
|
e0aceca1b7 | ||
|
|
87804624fe | ||
|
|
e029f8a9d7 | ||
|
|
1bc6681176 | ||
|
|
28322124e2 | ||
|
|
cbfe9de3e7 | ||
|
|
dc86b8d9d4 | ||
|
|
ba70118e2b | ||
|
|
cb1d3e50f7 | ||
|
|
ded0b19d97 | ||
|
|
d0bb3dd136 | ||
|
|
ab7714b01e | ||
|
|
e5b18df6db | ||
|
|
0abfd1bcb1 | ||
|
|
6186d11761 | ||
|
|
e8b457e8a6 | ||
|
|
afea40cc0f | ||
|
|
402b798f1b | ||
|
|
4759532e90 | ||
|
|
7f1e1713ab | ||
|
|
b2c5819dbc | ||
|
|
2b0156b1fc | ||
|
|
f6f0807c86 | ||
|
|
0c53d86017 | ||
|
|
6a6ee46d76 | ||
|
|
974cbb3bb7 | ||
|
|
03e996320e | ||
|
|
78fcb76294 | ||
|
|
3d152015eb | ||
|
|
ade8925155 | ||
|
|
05a6c170bf | ||
|
|
e1c2344591 | ||
|
|
48a591e9b4 | ||
|
|
fa5d9c02ef | ||
|
|
5bd27346ac | ||
|
|
3c82cf9327 | ||
|
|
70d40083e9 | ||
|
|
8a30967542 | ||
|
|
229f04ab79 | ||
|
|
1123dc3676 | ||
|
|
5bf41aff17 | ||
|
|
e47d787adb | ||
|
|
398ffb1136 | ||
|
|
5862582cd7 | ||
|
|
e36b1146d6 | ||
|
|
c28a4beeb7 | ||
|
|
15ab0808b3 | ||
|
|
3bae73fb42 | ||
|
|
bc527eceda | ||
|
|
b963f36e1e | ||
|
|
cdd7512a2e | ||
|
|
a6d5287310 | ||
|
|
22822f4151 | ||
|
|
0b7aa6af87 | ||
|
|
8c9ab85cfa | ||
|
|
b1c849bedc | ||
|
|
fb24bcfee0 | ||
|
|
8268c12cfb | ||
|
|
3f39da48ea | ||
|
|
9d5cdaa2e3 | ||
|
|
84e122c5c3 | ||
|
|
261111e728 | ||
|
|
0f1e8db4c5 | ||
|
|
64e803b136 | ||
|
|
a0f9e9f661 | ||
|
|
b6b7cddc9c | ||
|
|
241be9709c | ||
|
|
85f08d7752 | ||
|
|
6be88a2b99 | ||
|
|
060276932d | ||
|
|
6224849fd1 | ||
|
|
9b79eec29e | ||
|
|
c2e318dd40 | ||
|
|
69258d5945 | ||
|
|
d7ef6315ae | ||
|
|
aaf4fb1184 | ||
|
|
f05641c3c6 | ||
|
|
7a34c88d73 | ||
|
|
6c746843ac | ||
|
|
bb07df7e7b | ||
|
|
1cb824039e | ||
|
|
504e52b45e | ||
|
|
38c0840834 | ||
|
|
c65e67c357 | ||
|
|
fb2360ff88 | ||
|
|
1a2de1bdde | ||
|
|
993b97f1db | ||
|
|
1c4d28d7af | ||
|
|
af55f37b27 | ||
|
|
2d67c26794 | ||
|
|
006cacfefb | ||
|
|
c28f09d4a7 | ||
|
|
73992d2b9f | ||
|
|
a8f143298f | ||
|
|
2d44c161c7 | ||
|
|
9511056f44 | ||
|
|
fb4ad000b6 | ||
|
|
a8ff12bc72 | ||
|
|
1e1bd3afd9 | ||
|
|
7b239ae154 | ||
|
|
8a11282522 | ||
|
|
85c3db3a93 | ||
|
|
37383ecd09 | ||
|
|
6378ca10a4 | ||
|
|
9e81ccd2d9 | ||
|
|
72cff79c8a | ||
|
|
a5702f978e | ||
|
|
4687c4616f | ||
|
|
d8dfb57d5c | ||
|
|
b07c58aa05 | ||
|
|
cc0c41d216 | ||
|
|
f1302c40fe | ||
|
|
3b1aa40372 | ||
|
|
d96798ae7b | ||
|
|
b508264ac4 | ||
|
|
db78431b1d | ||
|
|
743ddb196a | ||
|
|
3ffeabdfcb | ||
|
|
51b1f41518 | ||
|
|
e7a56f35b9 | ||
|
|
516af01a12 | ||
|
|
acdb355070 | ||
|
|
37c02a5f7b | ||
|
|
04be352ae9 | ||
|
|
53eb7656de | ||
|
|
d8f0e0ea6e | ||
|
|
2e0fd2cba9 | ||
|
|
909b169593 | ||
|
|
4e67a4027e | ||
|
|
49055658a9 | ||
|
|
89c58ce87d | ||
|
|
14876a4df1 | ||
|
|
2681219039 | ||
|
|
5da7f0100a | ||
|
|
dea9abed29 | ||
|
|
e3eb5c1328 | ||
|
|
fb9364f1fb | ||
|
|
6efb56851c | ||
|
|
db2c7ed1d1 | ||
|
|
74c047cb03 | ||
|
|
50a5ad48fc | ||
|
|
292fccff6e | ||
|
|
a9dc061d84 | ||
|
|
01a8c09920 | ||
|
|
4c8562bcec | ||
|
|
f13c04629b | ||
|
|
80ff907d08 | ||
|
|
673df6d517 | ||
|
|
2d40433bc1 | ||
|
|
3bc39db34e | ||
|
|
a17f14f73a | ||
|
|
6651c655cb | ||
|
|
3ae104edae | ||
|
|
c87a489514 | ||
|
|
a60267501d | ||
|
|
641a56da0d | ||
|
|
59788e25c7 | ||
|
|
a16193bb50 | ||
|
|
132e7413ba | ||
|
|
1966668066 | ||
|
|
064f36ca5a | ||
|
|
15b609ecea | ||
|
|
4a1edfd9aa | ||
|
|
b7f319b62a | ||
|
|
33c101544d | ||
|
|
21cf29330e | ||
|
|
3b21bb5be8 | ||
|
|
6fe2b3f901 | ||
|
|
b368d4cc13 | ||
|
|
0680af7414 | ||
|
|
91805bcab6 | ||
|
|
c0e2886e37 | ||
|
|
4f5dded4d4 | ||
|
|
b3a94c4e85 | ||
|
|
8e618d45fc | ||
|
|
3ef59d2821 | ||
|
|
23db4958f5 | ||
|
|
d9ee668b6d | ||
|
|
2e5d792f0c | ||
|
|
b276651eaa | ||
|
|
3535197f99 | ||
|
|
95f076340a | ||
|
|
698bb93a46 | ||
|
|
2584430141 | ||
|
|
ded373e600 | ||
|
|
e8c54c3d6c | ||
|
|
f944a42886 | ||
|
|
eff0ea43aa | ||
|
|
3b602bb532 | ||
|
|
459985f0fa | ||
|
|
d0080046c2 | ||
|
|
7fcb428622 | ||
|
|
4ea6f94ed8 | ||
|
|
83adc2eebf | ||
|
|
989c318a28 | ||
|
|
ef802f2b2c | ||
|
|
f5d2fbc84c | ||
|
|
e139673969 | ||
|
|
a8c6465f22 | ||
|
|
27538e2d22 | ||
|
|
6c6f0987dc | ||
|
|
5f64658faa | ||
|
|
ce183cb2b4 | ||
|
|
b3bac73c0f | ||
|
|
e726d8ff0f | ||
|
|
f4230777b3 | ||
|
|
380233d646 | ||
|
|
d592bc0c1c | ||
|
|
0d0b0aa599 | ||
|
|
b433bf14ba | ||
|
|
cf371da346 | ||
|
|
107d951893 | ||
|
|
22c53b1c70 | ||
|
|
88926ad8e9 | ||
|
|
32d04091a2 | ||
|
|
b6d4a77b94 | ||
|
|
be84a4fd68 | ||
|
|
2ec1f404ac | ||
|
|
2040559f71 | ||
|
|
ca0ce4c6ef | ||
|
|
757cf413cb | ||
|
|
b35acb3dbc | ||
|
|
e404abf103 | ||
|
|
f7ff19cb18 | ||
|
|
f736702da8 | ||
|
|
91faaa1387 | ||
|
|
68a9f521d5 | ||
|
|
f365a98029 | ||
|
|
47bbc272df | ||
|
|
aebac90013 | ||
|
|
7ca4ba77c4 | ||
|
|
13512170b5 | ||
|
|
154fcaeb56 | ||
|
|
722118386d | ||
|
|
709612cb37 | ||
|
|
b35d083872 | ||
|
|
5e7b243bde | ||
|
|
f8f9fc77ac | ||
|
|
499531f0b5 | ||
|
|
3c2141513f | ||
|
|
602f6a9ad0 | ||
|
|
22c5a5b91b | ||
|
|
41f508765d | ||
|
|
7dccd1f589 | ||
|
|
55ff598b23 | ||
|
|
a22ce4550c | ||
|
|
168ae81b1f | ||
|
|
5f6a25cdd0 | ||
|
|
be97ae4c5d | ||
|
|
4d7d008741 | ||
|
|
2d7a3d1516 | ||
|
|
dfab400d43 | ||
|
|
70078eab10 | ||
|
|
3415c4dd1e | ||
|
|
e200808ab7 | ||
|
|
fae563b85d | ||
|
|
3e6dc02f8f | ||
|
|
95e4cbbfde | ||
|
|
2825294b7b | ||
|
|
bce93b5cfa | ||
|
|
7a4b250c8b | ||
|
|
e5335450a4 | ||
|
|
a6ffdf1dd4 | ||
|
|
69e41f87ef | ||
|
|
ee48f9f206 | ||
|
|
9ba39d7fad | ||
|
|
d2fb371f80 | ||
|
|
2f9018f03b | ||
|
|
eb990f64a9 | ||
|
|
bbb64eaade | ||
|
|
7bd1d899bc | ||
|
|
c91d1ec2e3 | ||
|
|
c50b64027d | ||
|
|
20960b6a2d | ||
|
|
3bd3470d0b | ||
|
|
ba39ed9af7 | ||
|
|
62e6dc950d | ||
|
|
5a5046ce45 | ||
|
|
ad04afe381 | ||
|
|
ba9f0f2480 | ||
|
|
d06b63d056 | ||
|
|
7ce28c3b1d | ||
|
|
e3ac4035b9 | ||
|
|
d21b6daa49 | ||
|
|
76ebb16688 | ||
|
|
55aa431578 | ||
|
|
614981e566 | ||
|
|
b8b956a05d | ||
|
|
d2eed44c78 | ||
|
|
789cbc6fb2 | ||
|
|
0662c90b5c | ||
|
|
a2cab02554 | ||
|
|
6c7a21df6b | ||
|
|
f933b0b708 | ||
|
|
9f305273a7 | ||
|
|
cbd9efcb43 | ||
|
|
29a25a538f | ||
|
|
2dd8faaedc | ||
|
|
f00187033d | ||
|
|
c5141d65ac | ||
|
|
069c4015cd | ||
|
|
2f6e03fb60 | ||
|
|
0fbb945e13 | ||
|
|
b94dd835c9 | ||
|
|
44fc707423 | ||
|
|
5aaef9790f | ||
|
|
7edc352d23 | ||
|
|
850a84b08a | ||
|
|
4148754ce0 | ||
|
|
2107722829 | ||
|
|
d326ba52e9 | ||
|
|
91e1487de4 | ||
|
|
5ffb2a9605 | ||
|
|
17fe91d6d1 | ||
|
|
90a9f2dd70 | ||
|
|
d5e48cfd65 | ||
|
|
d274566463 | ||
|
|
39ac720826 | ||
|
|
21b6204692 | ||
|
|
d98faeb26a | ||
|
|
0a63dc199c | ||
|
|
3ba857dfa1 | ||
|
|
a8554c4022 | ||
|
|
ba54b39c02 | ||
|
|
2a75225569 | ||
|
|
e72429c79c | ||
|
|
c5b3f5553f | ||
|
|
d3ae0aaad3 | ||
|
|
d67bccf861 | ||
|
|
1277ad69a6 | ||
|
|
8f93e81afb | ||
|
|
4af31e654b | ||
|
|
aad50579ba | ||
|
|
38d059b0ae | ||
|
|
bd4eeb4522 | ||
|
|
03e3493288 | ||
|
|
64baedf5a4 | ||
|
|
2f64d5f77e | ||
|
|
f79a4ef4d0 | ||
|
|
2d53854b19 | ||
|
|
e5c83535af | ||
|
|
c904ef966e | ||
|
|
8f266e0772 | ||
|
|
e0fe7cc391 | ||
|
|
9d20dec56a | ||
|
|
597a785253 | ||
|
|
7d75b1e758 | ||
|
|
5f78691fcf | ||
|
|
a591e06ae5 | ||
|
|
443c93c634 | ||
|
|
5659cddc84 | ||
|
|
2a03a34bde | ||
|
|
1654a9b7e6 | ||
|
|
673a521711 | ||
|
|
2e23076688 | ||
|
|
b92ac55250 | ||
|
|
7981509cc8 | ||
|
|
6d5bc045bc | ||
|
|
d38e020b29 | ||
|
|
7d29030292 | ||
|
|
7c7650b7c3 | ||
|
|
ca80eced24 | ||
|
|
d0e0b81d8e | ||
|
|
391baa1c9a | ||
|
|
ae14681c3e | ||
|
|
4d698841f4 | ||
|
|
9906b3ade9 | ||
|
|
bf1769d3e0 | ||
|
|
63e1ad9f29 | ||
|
|
2c7bcee53f | ||
|
|
1fd90c93ff | ||
|
|
e947a844c9 | ||
|
|
4e2d39293a | ||
|
|
1228d6bf1a | ||
|
|
fc4561c64c | ||
|
|
3b7747b42b | ||
|
|
e432e79324 | ||
|
|
08d74819b6 | ||
|
|
aa3fde1784 | ||
|
|
0b3eb7f218 | ||
|
|
69c9496c71 | ||
|
|
b792b36495 | ||
|
|
d3db7d31a3 | ||
|
|
c05ca63158 | ||
|
|
6d3e0c7db6 | ||
|
|
0e59e50b39 | ||
|
|
d4b391de1b | ||
|
|
de4d3dac00 | ||
|
|
534e7161df | ||
|
|
9b219cd646 | ||
|
|
3bab4822f3 | ||
|
|
3c5f2d8916 | ||
|
|
5808190398 | ||
|
|
b2a82248b1 | ||
|
|
4e5fcca8b9 | ||
|
|
c36eaedb93 | ||
|
|
7752b03add | ||
|
|
01bfc78535 | ||
|
|
074d70112d | ||
|
|
e8d14c0d90 | ||
|
|
60d7e8143a | ||
|
|
9667a170de | ||
|
|
abae30f9e1 | ||
|
|
f9311bc9d1 | ||
|
|
b598402738 | ||
|
|
bd026b913f | ||
|
|
72ff69d9bb | ||
|
|
f30417d9a8 | ||
|
|
47a4ad3cd7 | ||
|
|
2f7a10ab31 | ||
|
|
b534dc69ab | ||
|
|
7b7d2ea7d4 | ||
|
|
e00de1c302 | ||
|
|
3549e583a6 | ||
|
|
f5e3eedf34 | ||
|
|
519dbfebf6 | ||
|
|
9a267f9270 | ||
|
|
67bd71b7a5 | ||
|
|
ec49fff583 | ||
|
|
8b660e18f2 | ||
|
|
981497799a | ||
|
|
b9bdc17465 | ||
|
|
b413ff9fdb | ||
|
|
6a15580817 | ||
|
|
39633a5581 | ||
|
|
1e83f15e2f | ||
|
|
888d2bb1d8 | ||
|
|
847ee5ac45 | ||
|
|
9a9a49aa84 | ||
|
|
a03ca80269 | ||
|
|
523bd769f1 | ||
|
|
8ff70ea5a9 | ||
|
|
da3e7747ca | ||
|
|
4afb59e63f | ||
|
|
1526e7ece3 | ||
|
|
6c07bfee8a | ||
|
|
446c760820 | ||
|
|
04f92f1291 | ||
|
|
4a60a7794d | ||
|
|
e5b16adb1c | ||
|
|
402a3ac719 | ||
|
|
f3d61c51fc | ||
|
|
0cde17ae5d | ||
|
|
8c1bba681b | ||
|
|
dbfb5e797b | ||
|
|
08ff702434 | ||
|
|
0e2148264a | ||
|
|
a75f42344b | ||
|
|
7926401cbd | ||
|
|
8161411c5d | ||
|
|
f64dea2aac | ||
|
|
6579304d8c | ||
|
|
6bb10a81a6 | ||
|
|
3cf8a7c888 | ||
|
|
2e38bb5175 | ||
|
|
a372c6a377 | ||
|
|
93b2f8a0c5 | ||
|
|
1a6568a25d | ||
|
|
9e95703efc | ||
|
|
d8e05aca81 | ||
|
|
410a1ac040 | ||
|
|
4caa3422bd | ||
|
|
a658b976f5 | ||
|
|
135874ebdc | ||
|
|
f4f1c42cba | ||
|
|
e7aa26dc29 | ||
|
|
c54ffde568 | ||
|
|
9a3c992d7a | ||
|
|
0c855638de | ||
|
|
943d815783 | ||
|
|
4c0acba62d | ||
|
|
62c3cdee75 | ||
|
|
3212d0c8cd | ||
|
|
1d03bea965 | ||
|
|
fbfeb59658 | ||
|
|
701da1282a | ||
|
|
df93ff92ba | ||
|
|
77d5331e85 | ||
|
|
14cdadfb56 | ||
|
|
f3a52cc195 | ||
|
|
7640cd24c9 | ||
|
|
f7b665347e | ||
|
|
9693c382a8 | ||
|
|
ee1047bd52 | ||
|
|
5ea5ab162b | ||
|
|
b5a09ff96b | ||
|
|
95c65f4e8f | ||
|
|
6bfff7532e | ||
|
|
1aa8896ad6 | ||
|
|
3e32ceb39f | ||
|
|
ca1350b092 | ||
|
|
9205434ed3 | ||
|
|
cd50e9b4bc | ||
|
|
ec816f3840 | ||
|
|
5f774951b1 | ||
|
|
2ca9befd2a | ||
|
|
72f5cb577e | ||
|
|
928c0181bf | ||
|
|
03767d26da | ||
|
|
108e6f92d4 | ||
|
|
d653a59fc0 | ||
|
|
01bfdf949a | ||
|
|
98f7821eb3 | ||
|
|
2d3898e0d5 | ||
|
|
ae46ce9937 | ||
|
|
dfc112c06b | ||
|
|
ca5fab8656 | ||
|
|
6df76ca73c | ||
|
|
f65dd3e5a2 | ||
|
|
a8d601b64a | ||
|
|
73b4794cf7 | ||
|
|
e2709ea129 | ||
|
|
740ec80819 | ||
|
|
d95e054282 | ||
|
|
7c1f9667d1 | ||
|
|
9246990496 | ||
|
|
0cf3d93360 | ||
|
|
cb06aee5ac | ||
|
|
1c70e9ed1b | ||
|
|
f3d6a2dd37 | ||
|
|
d1c58fc2eb | ||
|
|
b8f05b1471 | ||
|
|
e7baf78ee8 | ||
|
|
87299eba10 | ||
|
|
d3a07c29ba | ||
|
|
8d39b715dc | ||
|
|
7e3166475d | ||
|
|
5206c0e883 | ||
|
|
41ec038523 | ||
|
|
08d3d06a06 | ||
|
|
074febd9e1 | ||
|
|
aa8d25797b | ||
|
|
8d7d4adb91 | ||
|
|
ffa91f9794 | ||
|
|
0c31e61343 | ||
|
|
9b926f7dbe | ||
|
|
35d8728990 | ||
|
|
f7ed9a75ba | ||
|
|
9496c17e13 | ||
|
|
ed64e91f06 | ||
|
|
a481825ae1 | ||
|
|
7bb0f32332 | ||
|
|
c6f8dc431e | ||
|
|
78f177b8ee | ||
|
|
787c44c39d | ||
|
|
f06fee0364 | ||
|
|
c957e0d426 | ||
|
|
04101d472f | ||
|
|
51fc145161 | ||
|
|
9d63bb1b41 | ||
|
|
8ff2a7a2b9 | ||
|
|
91f91d8f47 | ||
|
|
a207bd6790 | ||
|
|
96d226c0b1 | ||
|
|
a86d98826d | ||
|
|
1bb670ecba | ||
|
|
c9e9a8e2b9 | ||
|
|
272367ccd2 | ||
|
|
95bf4a57b6 | ||
|
|
2228eb61cb | ||
|
|
5f07eb2d17 | ||
|
|
d96d696841 | ||
|
|
e18c0ab9bf | ||
|
|
faeb2b7e79 | ||
|
|
97ce11cb6b | ||
|
|
d7daae4762 | ||
|
|
3d86ae12bc | ||
|
|
ba46ee5dfa | ||
|
|
912bbb2f1d | ||
|
|
4f660a8eb7 | ||
|
|
ae4fb1b72e | ||
|
|
b435806d91 | ||
|
|
06929258bc |
11
.github/workflows/go-cross.yml
vendored
11
.github/workflows/go-cross.yml
vendored
@@ -3,12 +3,11 @@ name: Crosscompile
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -21,11 +20,11 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
||||
5
.github/workflows/go-fips.yml
vendored
5
.github/workflows/go-fips.yml
vendored
@@ -3,8 +3,7 @@ name: FIPS Build Test
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
5
.github/workflows/go-healing.yml
vendored
5
.github/workflows/go-healing.yml
vendored
@@ -3,8 +3,7 @@ name: Healing Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
18
.github/workflows/go-lint.yml
vendored
18
.github/workflows/go-lint.yml
vendored
@@ -3,12 +3,11 @@ name: Linters and Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -21,23 +20,14 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
||||
39
.github/workflows/go-resiliency.yml
vendored
Normal file
39
.github/workflows/go-resiliency.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Resiliency Functional Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-resiliency
|
||||
6
.github/workflows/go.yml
vendored
6
.github/workflows/go.yml
vendored
@@ -3,8 +3,7 @@ name: Functional Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -40,3 +39,4 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make verify
|
||||
make test-timeout
|
||||
|
||||
5
.github/workflows/helm-lint.yml
vendored
5
.github/workflows/helm-lint.yml
vendored
@@ -3,8 +3,7 @@ name: Helm Chart linting
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -23,7 +22,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
uses: azure/setup-helm@v4
|
||||
|
||||
- name: Run helm lint
|
||||
run: |
|
||||
|
||||
45
.github/workflows/iam-integrations.yaml
vendored
45
.github/workflows/iam-integrations.yaml
vendored
@@ -3,8 +3,7 @@ name: IAM integration
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -62,7 +61,7 @@ jobs:
|
||||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
@@ -112,6 +111,12 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
go run docs/iam/access-manager-plugin.go &
|
||||
make test-iam
|
||||
- name: Test MinIO Old Version data to IAM import current version
|
||||
if: matrix.ldap == 'ldaphost:389'
|
||||
env:
|
||||
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
run: |
|
||||
make test-iam-ldap-upgrade-import
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
@@ -120,3 +125,37 @@ jobs:
|
||||
if: matrix.openid == 'http://127.0.0.1:5556/dex'
|
||||
run: |
|
||||
make test-site-replication-oidc
|
||||
iam-import-with-missing-entities:
|
||||
name: Test IAM import in new cluster with missing entities
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster there are missing groups etc
|
||||
run: |
|
||||
make test-iam-import-with-missing-entities
|
||||
iam-import-with-openid:
|
||||
name: Test IAM import in new cluster with opendid configurations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Checkout minio-iam-testing
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: minio/minio-iam-testing
|
||||
path: minio-iam-testing
|
||||
- name: Test import of IAM artifacts when in fresh cluster with openid configurations
|
||||
run: |
|
||||
make test-iam-import-with-openid
|
||||
|
||||
8
.github/workflows/mint.yml
vendored
8
.github/workflows/mint.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -30,7 +29,7 @@ jobs:
|
||||
- name: setup-go-step
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
go-version: 1.24.x
|
||||
|
||||
- name: github sha short
|
||||
id: vars
|
||||
@@ -56,6 +55,11 @@ jobs:
|
||||
run: |
|
||||
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
# FIXME: renable this back when we have a valid way to add deadlines for PUT()s (internode CreateFile)
|
||||
# - name: resiliency
|
||||
# run: |
|
||||
# ${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
- name: The job must cleanup
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
|
||||
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
78
.github/workflows/mint/minio-resiliency.yaml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
environment:
|
||||
MINIO_CI_CD: "on"
|
||||
MINIO_ROOT_USER: "minio"
|
||||
MINIO_ROOT_PASSWORD: "minio123"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
MINIO_DRIVE_MAX_TIMEOUT: "5s"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- rdata1-1:/rdata1
|
||||
- rdata1-2:/rdata2
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- rdata2-1:/rdata1
|
||||
- rdata2-2:/rdata2
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- rdata3-1:/rdata1
|
||||
- rdata3-2:/rdata2
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- rdata4-1:/rdata1
|
||||
- rdata4-2:/rdata2
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
hostname: nginx
|
||||
volumes:
|
||||
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
rdata1-1:
|
||||
rdata1-2:
|
||||
rdata2-1:
|
||||
rdata2-2:
|
||||
rdata3-1:
|
||||
rdata3-2:
|
||||
rdata4-1:
|
||||
rdata4-2:
|
||||
7
.github/workflows/mint/nginx-4-node.conf
vendored
7
.github/workflows/mint/nginx-4-node.conf
vendored
@@ -23,10 +23,9 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
16
.github/workflows/mint/nginx-8-node.conf
vendored
16
.github/workflows/mint/nginx-8-node.conf
vendored
@@ -23,14 +23,14 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio5:9000;
|
||||
server minio6:9000;
|
||||
server minio7:9000;
|
||||
server minio8:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio5:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio6:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio7:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio8:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
8
.github/workflows/mint/nginx.conf
vendored
8
.github/workflows/mint/nginx.conf
vendored
@@ -23,10 +23,10 @@ http {
|
||||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
||||
56
.github/workflows/multipart/migrate.sh
vendored
56
.github/workflows/multipart/migrate.sh
vendored
@@ -24,11 +24,6 @@ if [ ! -f ./mc ]; then
|
||||
chmod +x mc
|
||||
fi
|
||||
|
||||
(
|
||||
cd ./docs/debugging/s3-check-md5
|
||||
go install -v
|
||||
)
|
||||
|
||||
export RELEASE=RELEASE.2023-08-29T23-07-35Z
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml up -d
|
||||
@@ -48,10 +43,10 @@ sleep 30s
|
||||
|
||||
sleep 5
|
||||
|
||||
s3-check-md5 -h
|
||||
./s3-check-md5 -h
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
@@ -67,8 +62,8 @@ fi
|
||||
|
||||
sleep 5
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
## we do not need to fail here, since we are going to test
|
||||
## upgrading to master, healing and being able to recover
|
||||
@@ -96,8 +91,8 @@ for i in $(seq 1 10); do
|
||||
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
|
||||
done
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
@@ -109,6 +104,43 @@ if [ $failed_count_site2 -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Add user group test
|
||||
./mc admin user add site1 site-replication-issue-user site-replication-issue-password
|
||||
./mc admin group add site1 site-replication-issue-group site-replication-issue-user
|
||||
|
||||
max_wait_attempts=30
|
||||
wait_interval=5
|
||||
|
||||
attempt=1
|
||||
while true; do
|
||||
diff <(./mc admin group info site1 site-replication-issue-group) <(./mc admin group info site2 site-replication-issue-group)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Outputs are consistent."
|
||||
break
|
||||
fi
|
||||
|
||||
remaining_attempts=$((max_wait_attempts - attempt))
|
||||
if ((attempt >= max_wait_attempts)); then
|
||||
echo "Outputs remain inconsistent after $max_wait_attempts attempts. Exiting with error."
|
||||
exit 1
|
||||
else
|
||||
echo "Outputs are inconsistent. Waiting for $wait_interval seconds (attempt $attempt/$max_wait_attempts)."
|
||||
sleep $wait_interval
|
||||
fi
|
||||
|
||||
((attempt++))
|
||||
done
|
||||
|
||||
status=$(./mc admin group info site1 site-replication-issue-group --json | jq .groupStatus | tr -d '"')
|
||||
|
||||
if [[ $status == "enabled" ]]; then
|
||||
echo "Success"
|
||||
else
|
||||
echo "Expected status: enabled, actual status: $status"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cleanup
|
||||
|
||||
## change working directory
|
||||
|
||||
24
.github/workflows/replication.yaml
vendored
24
.github/workflows/replication.yaml
vendored
@@ -3,8 +3,7 @@ name: MinIO advanced tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -22,7 +21,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -36,6 +35,19 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-decom
|
||||
|
||||
- name: Test ILM
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-ilm
|
||||
make test-ilm-transition
|
||||
|
||||
- name: Test PBAC
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-pbac
|
||||
|
||||
- name: Test Config File
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
@@ -59,3 +71,9 @@ jobs:
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-versioning
|
||||
|
||||
- name: Test Multipart upload with failures
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-multipart
|
||||
|
||||
5
.github/workflows/root-disable.yml
vendored
5
.github/workflows/root-disable.yml
vendored
@@ -3,8 +3,7 @@ name: Root lockdown tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
20
.github/workflows/run-mint.sh
vendored
20
.github/workflows/run-mint.sh
vendored
@@ -15,8 +15,11 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
||||
## change working directory
|
||||
cd .github/workflows/mint
|
||||
|
||||
## always pull latest
|
||||
docker pull docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 30s
|
||||
sleep 1m
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
@@ -26,6 +29,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
|
||||
|
||||
# Pause one node, to check that all S3 calls work while one node goes wrong
|
||||
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
|
||||
|
||||
docker run --rm --net=mint_default \
|
||||
--name="mint-${MODE}-${JOB_NAME}" \
|
||||
-e SERVER_ENDPOINT="nginx:9000" \
|
||||
@@ -35,6 +41,18 @@ docker run --rm --net=mint_default \
|
||||
-e MINT_MODE="${MINT_MODE}" \
|
||||
docker.io/minio/mint:edge
|
||||
|
||||
# FIXME: enable this after fixing aws-sdk-java-v2 tests
|
||||
# # unpause the node, to check that all S3 calls work while one node goes wrong
|
||||
# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4
|
||||
# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \
|
||||
# --name="mint-${MODE}-${JOB_NAME}" \
|
||||
# -e SERVER_ENDPOINT="nginx:9000" \
|
||||
# -e ACCESS_KEY="${ACCESS_KEY}" \
|
||||
# -e SECRET_KEY="${SECRET_KEY}" \
|
||||
# -e ENABLE_HTTPS=0 \
|
||||
# -e MINT_MODE="${MINT_MODE}" \
|
||||
# docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml down || true
|
||||
sleep 10s
|
||||
|
||||
|
||||
3
.github/workflows/shfmt.yml
vendored
3
.github/workflows/shfmt.yml
vendored
@@ -3,8 +3,7 @@ name: Shell formatting checks
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
2
.github/workflows/typos.yml
vendored
2
.github/workflows/typos.yml
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: Test GitHub Action
|
||||
name: Spelling
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
|
||||
5
.github/workflows/upgrade-ci-cd.yaml
vendored
5
.github/workflows/upgrade-ci-cd.yaml
vendored
@@ -3,8 +3,7 @@ name: Upgrade old version tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
@@ -21,7 +20,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
|
||||
6
.github/workflows/vulncheck.yml
vendored
6
.github/workflows/vulncheck.yml
vendored
@@ -21,11 +21,11 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.8
|
||||
check-latest: true
|
||||
go-version: 1.24.x
|
||||
cached: false
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
run: govulncheck -show verbose ./...
|
||||
shell: bash
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -43,4 +43,13 @@ docs/debugging/inspect/inspect
|
||||
docs/debugging/pprofgoparser/pprofgoparser
|
||||
docs/debugging/reorder-disks/reorder-disks
|
||||
docs/debugging/populate-hard-links/populate-hardlinks
|
||||
docs/debugging/xattr/xattr
|
||||
docs/debugging/xattr/xattr
|
||||
hash-set
|
||||
healing-bin
|
||||
inspect
|
||||
pprofgoparser
|
||||
reorder-disks
|
||||
s3-check-md5
|
||||
s3-verify
|
||||
xattr
|
||||
xl-meta
|
||||
|
||||
@@ -1,36 +1,64 @@
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
simplify: true
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
staticcheck:
|
||||
checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016']
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- durationcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gomodguard
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- tenv
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
- usetesting
|
||||
- whitespace
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -SA1008
|
||||
- -SA1019
|
||||
- -SA4000
|
||||
- -SA9004
|
||||
- -ST1000
|
||||
- -ST1005
|
||||
- -ST1016
|
||||
- -U1000
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- linters:
|
||||
- forcetypeassert
|
||||
path: _test\.go
|
||||
- path: (.+)\.go$
|
||||
text: 'empty-block:'
|
||||
- path: (.+)\.go$
|
||||
text: 'unused-parameter:'
|
||||
- path: (.+)\.go$
|
||||
text: 'dot-imports:'
|
||||
- path: (.+)\.go$
|
||||
text: should have a package comment
|
||||
- path: (.+)\.go$
|
||||
text: error strings should not be capitalized or end with punctuation or a newline
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- "empty-block:"
|
||||
- "unused-parameter:"
|
||||
- "dot-imports:"
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
max-issues-per-linter: 100
|
||||
max-same-issues: 100
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
32
.typos.toml
32
.typos.toml
@@ -1,8 +1,5 @@
|
||||
[files]
|
||||
extend-exclude = [
|
||||
".git/",
|
||||
"docs/",
|
||||
]
|
||||
extend-exclude = [".git/", "docs/", "CREDITS", "go.mod", "go.sum"]
|
||||
ignore-hidden = false
|
||||
|
||||
[default]
|
||||
@@ -12,20 +9,37 @@ extend-ignore-re = [
|
||||
"[0-9A-Za-z/+=]{64}",
|
||||
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
|
||||
"eyJmb28iOiJiYXIifQ",
|
||||
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*",
|
||||
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
|
||||
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||
'sessionToken',
|
||||
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
|
||||
"ERRO:",
|
||||
"(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # ignore line
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
"encrypter" = "encrypter"
|
||||
"kms" = "kms"
|
||||
"requestor" = "requestor"
|
||||
|
||||
[default.extend-identifiers]
|
||||
"bui" = "bui"
|
||||
"toi" = "toi"
|
||||
"ot" = "ot"
|
||||
"dm2nd" = "dm2nd"
|
||||
"HashiCorp" = "HashiCorp"
|
||||
|
||||
[type.go.extend-identifiers]
|
||||
"bui" = "bui"
|
||||
"dm2nd" = "dm2nd"
|
||||
"ot" = "ot"
|
||||
"ParseND" = "ParseND"
|
||||
"ParseNDStream" = "ParseNDStream"
|
||||
"pn" = "pn"
|
||||
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
|
||||
"thr" = "thr"
|
||||
"toi" = "toi"
|
||||
|
||||
[type.go]
|
||||
extend-ignore-identifiers-re = [
|
||||
# Variants of `typ` used to mean `type` in golang as it is otherwise a
|
||||
# keyword - some of these (like typ1 -> type1) can be fixed, but probably
|
||||
# not worth the effort.
|
||||
"[tT]yp[0-9]*",
|
||||
]
|
||||
|
||||
@@ -12,8 +12,9 @@ Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to
|
||||
|
||||
```sh
|
||||
git clone https://github.com/minio/minio
|
||||
cd minio
|
||||
go install -v
|
||||
ls /go/bin/minio
|
||||
ls $(go env GOPATH)/bin/minio
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY ./minio /usr/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
FROM minio/minio:latest
|
||||
|
||||
ENV PATH=/opt/bin:$PATH
|
||||
|
||||
COPY ./minio /opt/bin/minio
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
CMD ["minio"]
|
||||
@@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
@@ -1,35 +1,39 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
apk add -U --no-cache bash && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
COPY dockerscripts/download-static-curl.sh /build/download-static-curl
|
||||
RUN chmod +x /build/download-static-curl && \
|
||||
/build/download-static-curl
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
@@ -51,10 +55,12 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/curl* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
|
||||
|
||||
ARG RELEASE
|
||||
|
||||
LABEL name="MinIO" \
|
||||
vendor="MinIO Inc <dev@min.io>" \
|
||||
maintainer="MinIO Inc <dev@min.io>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
|
||||
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
|
||||
|
||||
ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_SECRET_KEY_FILE=secret_key \
|
||||
MINIO_ROOT_USER_FILE=access_key \
|
||||
MINIO_ROOT_PASSWORD_FILE=secret_key \
|
||||
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
|
||||
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
|
||||
MINIO_CONFIG_ENV_FILE=config.env
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
CMD ["minio"]
|
||||
@@ -1,24 +1,26 @@
|
||||
FROM golang:1.21-alpine as build
|
||||
FROM golang:1.24-alpine AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH=/go
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install curl and minisign
|
||||
RUN apk add -U --no-cache ca-certificates && \
|
||||
apk add -U --no-cache curl && \
|
||||
go install aead.dev/minisign/cmd/minisign@v0.2.1
|
||||
|
||||
# Download minio binary and signature file
|
||||
# Download minio binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \
|
||||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
# Download mc binary and signature file
|
||||
# Download mc binary and signature files
|
||||
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
@@ -51,9 +53,11 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
||||
MINIO_CONFIG_ENV_FILE=config.env \
|
||||
MC_CONFIG_DIR=/tmp/.mc
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/minio* /usr/bin/
|
||||
COPY --from=build /go/bin/mc* /usr/bin/
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
|
||||
105
Makefile
105
Makefile
@@ -2,8 +2,8 @@ PWD := $(shell pwd)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
|
||||
|
||||
GOARCH := $(shell go env GOARCH)
|
||||
GOOS := $(shell go env GOOS)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
|
||||
VERSION ?= $(shell git describe --tags)
|
||||
REPO ?= quay.io/minio
|
||||
@@ -24,8 +24,6 @@ help: ## print this help
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.1.10-0.20240227114326-6d6f813fff1b
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
crosscompile: ## cross compile minio
|
||||
@(env bash $(PWD)/buildscripts/cross-compile.sh)
|
||||
@@ -34,31 +32,47 @@ verifiers: lint check-gen
|
||||
|
||||
check-gen: ## check for updated autogenerated files
|
||||
@go generate ./... >/dev/null
|
||||
@go mod tidy -compat=1.21
|
||||
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
|
||||
@(! git diff --name-only | grep 'go.sum') || (echo "Non-committed changes in auto-generated go.sum is detected, please commit them to proceed." && false)
|
||||
|
||||
lint: getdeps ## runs golangci-lint suite of linters
|
||||
@echo "Running $@ check"
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml
|
||||
@command typos && typos ./ || echo "typos binary is not found.. skipping.."
|
||||
|
||||
lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
|
||||
@echo "Running $@ check"
|
||||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
|
||||
|
||||
check: test
|
||||
test: verifiers build build-debugging ## builds minio, runs linters, tests
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue ./...
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue,dev ./...
|
||||
|
||||
test-root-disable: install-race
|
||||
@echo "Running minio root lockdown tests"
|
||||
@env bash $(PWD)/buildscripts/disable-root.sh
|
||||
|
||||
test-ilm: install-race
|
||||
@echo "Running ILM tests"
|
||||
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
|
||||
|
||||
test-ilm-transition: install-race
|
||||
@echo "Running ILM tiering tests with healing"
|
||||
@env bash $(PWD)/docs/bucket/lifecycle/setup_ilm_transition.sh
|
||||
|
||||
test-pbac: install-race
|
||||
@echo "Running bucket policies tests"
|
||||
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
|
||||
|
||||
test-decom: install-race
|
||||
@echo "Running minio decom tests"
|
||||
@env bash $(PWD)/docs/distributed/decom.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
|
||||
|
||||
test-versioning: install-race
|
||||
@echo "Running minio versioning tests"
|
||||
@@ -75,11 +89,23 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
|
||||
@echo "Running unit tests under -race"
|
||||
@(env bash $(PWD)/buildscripts/race.sh)
|
||||
|
||||
test-iam: build ## verify IAM (external IDP, etcd backends)
|
||||
test-iam: install-race ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -timeout 15m -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -timeout 15m -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
|
||||
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
|
||||
|
||||
test-iam-import-with-missing-entities: install-race ## test import of external iam config withg missing entities
|
||||
@echo "Test IAM import configurations with missing entities"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-missing-entities.sh
|
||||
|
||||
test-iam-import-with-openid: install-race
|
||||
@echo "Test IAM import configurations with openid"
|
||||
@env bash $(PWD)/docs/distributed/iam-import-with-openid.sh
|
||||
|
||||
test-sio-error:
|
||||
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
|
||||
@@ -93,7 +119,10 @@ test-replication-3site:
|
||||
test-delete-replication:
|
||||
@(env bash $(PWD)/docs/bucket/replication/delete-replication.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication
|
||||
test-delete-marker-proxying:
|
||||
@(env bash $(PWD)/docs/bucket/replication/test_del_marker_proxying.sh)
|
||||
|
||||
test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error test-delete-marker-proxying ## verify multi site replication
|
||||
@echo "Running tests for replicating three sites"
|
||||
|
||||
test-site-replication-ldap: install-race ## verify automatic site replication
|
||||
@@ -114,38 +143,42 @@ test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
test-multipart: install-race ## test multipart
|
||||
@echo "Test multipart behavior when part files are missing"
|
||||
@(env bash $(PWD)/buildscripts/multipart-quorum-test.sh)
|
||||
|
||||
test-timeout: install-race ## test multipart
|
||||
@echo "Test server timeout"
|
||||
@(env bash $(PWD)/buildscripts/test-timeout.sh)
|
||||
|
||||
verify: install-race ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
verify-healing: install-race ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
|
||||
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
|
||||
|
||||
verify-healing-with-root-disks: ## verify healing root disks
|
||||
verify-healing-with-root-disks: install-race ## verify healing root disks
|
||||
@echo "Verify healing with root drives"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
|
||||
|
||||
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
@echo "Verify healing with rewrite"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build-debugging:
|
||||
@(env bash $(PWD)/docs/debugging/build.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
build: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
hotfix-vars:
|
||||
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
|
||||
@@ -153,9 +186,9 @@ hotfix-vars:
|
||||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
|
||||
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.9/pkger_2.2.9_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.2.9_linux_amd64.deb --yes
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.11/pkger_2.3.11_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.1.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.3.11_linux_amd64.deb --yes
|
||||
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
|
||||
@@ -165,11 +198,11 @@ hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@pkger -r $(VERSION) --ignore
|
||||
|
||||
hotfix-push: hotfix
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
|
||||
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
|
||||
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-$(GOOS)/archive/minio.$(VERSION)"
|
||||
|
||||
docker-hotfix-push: docker-hotfix
|
||||
@docker push -q $(TAG) && echo "Published new container $(TAG)"
|
||||
@@ -182,15 +215,19 @@ docker: build ## builds minio docker container
|
||||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install-race: checks ## builds minio to $(PWD)
|
||||
test-resiliency: build
|
||||
@echo "Running resiliency tests"
|
||||
@(DOCKER_COMPOSE_FILE=$(PWD)/docs/resiliency/docker-compose.yaml env bash $(PWD)/docs/resiliency/resiliency-tests.sh)
|
||||
|
||||
install-race: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary with -race to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean: ## cleanup all generated assets
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. To learn more about what MinIO is doing for AI storage, go to [AI storage documentation](https://min.io/solutions/object-storage-for-ai).
|
||||
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
@@ -93,7 +93,6 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
|
||||
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
|
||||
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
|
||||
| IBM Z-Series (S390X) | <https://dl.min.io/server/minio/release/linux-s390x/minio> |
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
@@ -123,7 +122,7 @@ You can also connect using any S3-compatible tool, such as the MinIO Client `mc`
|
||||
|
||||
## Install from Source
|
||||
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.21](https://golang.org/dl/#stable)
|
||||
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
go install github.com/minio/minio@latest
|
||||
@@ -210,10 +209,6 @@ For deployments behind a load balancer, proxy, or ingress rule where the MinIO h
|
||||
|
||||
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
|
||||
|
||||
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
|  |  |
|
||||
|
||||
0
buildscripts/checkdeps.sh
Normal file → Executable file
0
buildscripts/checkdeps.sh
Normal file → Executable file
@@ -32,6 +32,7 @@ fi
|
||||
set +e
|
||||
|
||||
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
|
||||
./mc ready minioadm
|
||||
|
||||
./mc ls minioadm/
|
||||
|
||||
@@ -56,7 +57,7 @@ done
|
||||
|
||||
set +e
|
||||
|
||||
sleep 10
|
||||
./mc ready minioadm/
|
||||
|
||||
./mc ls minioadm/
|
||||
if [ $? -ne 0 ]; then
|
||||
@@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin replicate add sitea siteb
|
||||
|
||||
./mc admin user add sitea foobar foo12345
|
||||
@@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
||||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin user add sitea foobar-admin foo12345
|
||||
|
||||
sleep 2s
|
||||
|
||||
127
buildscripts/minio-iam-ldap-upgrade-import-test.sh
Executable file
127
buildscripts/minio-iam-ldap-upgrade-import-test.sh
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is used to test the migration of IAM content from old minio
|
||||
# instance to new minio instance.
|
||||
#
|
||||
# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing
|
||||
# repo (e.g. make podman-run), and then run this script.
|
||||
#
|
||||
# This script assumes that LDAP server is at:
|
||||
#
|
||||
# `localhost:389`
|
||||
#
|
||||
# if this is not the case, set the environment variable
|
||||
# `_MINIO_LDAP_TEST_SERVER`.
|
||||
|
||||
OLD_VERSION=RELEASE.2024-03-26T22-10-45Z
|
||||
OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION}
|
||||
|
||||
__init__() {
|
||||
if which curl &>/dev/null; then
|
||||
echo "curl is already installed"
|
||||
else
|
||||
echo "Installing curl:"
|
||||
sudo apt install curl -y
|
||||
fi
|
||||
|
||||
export GOPATH=/tmp/gopath
|
||||
export PATH="${PATH}":"${GOPATH}"/bin
|
||||
|
||||
if which mc &>/dev/null; then
|
||||
echo "mc is already installed"
|
||||
else
|
||||
echo "Installing mc:"
|
||||
go install github.com/minio/mc@latest
|
||||
fi
|
||||
|
||||
if [ ! -x ./minio.${OLD_VERSION} ]; then
|
||||
echo "Downloading minio.${OLD_VERSION} binary"
|
||||
curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK}
|
||||
chmod +x minio.${OLD_VERSION}
|
||||
fi
|
||||
|
||||
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:389
|
||||
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
|
||||
fi
|
||||
|
||||
rm -rf /tmp/data
|
||||
}
|
||||
|
||||
create_iam_content_in_old_minio() {
|
||||
echo "Creating IAM content in old minio instance."
|
||||
|
||||
MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
|
||||
mc ready old-minio
|
||||
mc idp ldap add old-minio \
|
||||
server_addr=localhost:389 \
|
||||
server_insecure=on \
|
||||
lookup_bind_dn=cn=admin,dc=min,dc=io \
|
||||
lookup_bind_password=admin \
|
||||
user_dn_search_base_dn=dc=min,dc=io \
|
||||
user_dn_search_filter="(uid=%s)" \
|
||||
group_search_base_dn=ou=swengg,dc=min,dc=io \
|
||||
group_search_filter="(&(objectclass=groupOfNames)(member=%d))"
|
||||
mc admin service restart old-minio
|
||||
|
||||
mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io
|
||||
mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io
|
||||
|
||||
mc idp ldap policy entities old-minio
|
||||
|
||||
mc admin cluster iam export old-minio
|
||||
set +x
|
||||
|
||||
mc admin service stop old-minio
|
||||
}
|
||||
|
||||
import_iam_content_in_new_minio() {
|
||||
echo "Importing IAM content in new minio instance."
|
||||
# Assume current minio binary exists.
|
||||
MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
|
||||
echo "BEFORE IMPORT mappings:"
|
||||
mc ready new-minio
|
||||
mc idp ldap policy entities new-minio
|
||||
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
|
||||
echo "AFTER IMPORT mappings:"
|
||||
mc idp ldap policy entities new-minio
|
||||
set +x
|
||||
|
||||
# mc admin service stop new-minio
|
||||
}
|
||||
|
||||
verify_iam_content_in_new_minio() {
|
||||
output=$(mc idp ldap policy entities new-minio --json)
|
||||
|
||||
groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]')
|
||||
if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify groups: $groups"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]')
|
||||
if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify users: $users"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mc admin service stop new-minio
|
||||
}
|
||||
|
||||
main() {
|
||||
create_iam_content_in_old_minio
|
||||
|
||||
import_iam_content_in_new_minio
|
||||
|
||||
verify_iam_content_in_new_minio
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
||||
29
buildscripts/minio-upgrade.sh
Normal file → Executable file
29
buildscripts/minio-upgrade.sh
Normal file → Executable file
@@ -4,10 +4,22 @@ trap 'cleanup $LINENO' ERR
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
cleanup() {
|
||||
MINIO_VERSION=dev docker-compose \
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm -s -f
|
||||
down || true
|
||||
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \
|
||||
-f "buildscripts/upgrade-tests/compose.yml" \
|
||||
rm || true
|
||||
|
||||
for volume in $(docker volume ls -q | grep upgrade); do
|
||||
docker volume rm ${volume} || true
|
||||
done
|
||||
|
||||
docker volume prune -f
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
docker volume rm $(docker volume ls -q -f dangling=true) || true
|
||||
}
|
||||
|
||||
verify_checksum_after_heal() {
|
||||
@@ -55,6 +67,15 @@ __init__() {
|
||||
|
||||
go install github.com/minio/mc@latest
|
||||
|
||||
## this is needed because github actions don't have
|
||||
## docker-compose on all runners
|
||||
COMPOSE_VERSION=v2.35.1
|
||||
mkdir -p /tmp/gopath/bin/
|
||||
wget -O /tmp/gopath/bin/docker-compose https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-linux-x86_64
|
||||
chmod +x /tmp/gopath/bin/docker-compose
|
||||
|
||||
cleanup
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
@@ -72,11 +93,11 @@ __init__() {
|
||||
|
||||
curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum
|
||||
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop
|
||||
}
|
||||
|
||||
main() {
|
||||
MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build
|
||||
|
||||
add_alias
|
||||
|
||||
|
||||
126
buildscripts/multipart-quorum-test.sh
Normal file
126
buildscripts/multipart-quorum-test.sh
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TEST_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio || true
|
||||
pkill -9 minio || true
|
||||
purge "$WORK_DIR"
|
||||
if [ $# -ne 0 ]; then
|
||||
exit $#
|
||||
fi
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
function start_minio_10drive() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...10}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 5
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"${PWD}/mc" mb --with-versioning minio/bucket
|
||||
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api create-multipart-upload --bucket bucket --key obj-1 >upload-id.json
|
||||
uploadId=$(jq -r '.UploadId' upload-id.json)
|
||||
|
||||
truncate -s 5MiB file-5mib
|
||||
for i in {1..2}; do
|
||||
aws --endpoint-url http://localhost:"$start_port" s3api upload-part \
|
||||
--upload-id "$uploadId" --bucket bucket --key obj-1 \
|
||||
--part-number "$i" --body ./file-5mib
|
||||
done
|
||||
for i in {1..6}; do
|
||||
find ${WORK_DIR}/disk${i}/.minio.sys/multipart/ -type f -name "part.1" -delete
|
||||
done
|
||||
cat <<EOF >parts.json
|
||||
{
|
||||
"Parts": [
|
||||
{
|
||||
"PartNumber": 1,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
},
|
||||
{
|
||||
"PartNumber": 2,
|
||||
"ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
err=$(aws --endpoint-url http://localhost:"$start_port" s3api complete-multipart-upload --upload-id "$uploadId" --bucket bucket --key obj-1 --multipart-upload file://./parts.json 2>&1)
|
||||
rv=$?
|
||||
if [ $rv -eq 0 ]; then
|
||||
echo "Failed to receive an error"
|
||||
exit 1
|
||||
fi
|
||||
echo "Received an error during complete-multipart as expected: $err"
|
||||
}
|
||||
|
||||
function main() {
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
start_minio_10drive ${start_port}
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -45,7 +45,8 @@ function verify_rewrite() {
|
||||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -77,7 +78,8 @@ function verify_rewrite() {
|
||||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
@@ -87,17 +89,12 @@ function verify_rewrite() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
cd ./docs/debugging/s3-check-md5
|
||||
go install -v
|
||||
)
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
@@ -117,7 +114,7 @@ function verify_rewrite() {
|
||||
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
|
||||
sleep 1
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
|
||||
137
buildscripts/test-timeout.sh
Normal file
137
buildscripts/test-timeout.sh
Normal file
@@ -0,0 +1,137 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TEST_DEBUG" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trap 'catch $LINENO' ERR
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
catch() {
|
||||
if [ $# -ne 0 ]; then
|
||||
echo "error on line $1"
|
||||
fi
|
||||
|
||||
echo "Cleaning up instances of MinIO"
|
||||
pkill minio || true
|
||||
pkill -9 minio || true
|
||||
purge "$WORK_DIR"
|
||||
if [ $# -ne 0 ]; then
|
||||
exit $#
|
||||
fi
|
||||
}
|
||||
|
||||
catch
|
||||
|
||||
function gen_put_request() {
|
||||
hdr_sleep=$1
|
||||
body_sleep=$2
|
||||
|
||||
echo "PUT /testbucket/testobject HTTP/1.1"
|
||||
sleep $hdr_sleep
|
||||
echo "Host: foo-header"
|
||||
echo "User-Agent: curl/8.2.1"
|
||||
echo "Accept: */*"
|
||||
echo "Content-Length: 30"
|
||||
echo ""
|
||||
|
||||
sleep $body_sleep
|
||||
echo "random line 0"
|
||||
echo "random line 1"
|
||||
echo ""
|
||||
echo ""
|
||||
}
|
||||
|
||||
function send_put_object_request() {
|
||||
hdr_timeout=$1
|
||||
body_timeout=$2
|
||||
|
||||
start=$(date +%s)
|
||||
timeout 5m bash -c "gen_put_request $hdr_timeout $body_timeout | netcat 127.0.0.1 $start_port | read" || return -1
|
||||
[ $(($(date +%s) - start)) -gt $((srv_hdr_timeout + srv_idle_timeout + 1)) ] && return -1
|
||||
return 0
|
||||
}
|
||||
|
||||
function test_minio_with_timeout() {
|
||||
start_port=$1
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/"
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
mkdir ${WORK_DIR}
|
||||
C_PWD=${PWD}
|
||||
if [ ! -x "$PWD/mc" ]; then
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
fi
|
||||
|
||||
"${MINIO[@]}" --address ":$start_port" --read-header-timeout ${srv_hdr_timeout}s --idle-timeout ${srv_idle_timeout}s "${WORK_DIR}/disk/" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 1
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
"${PWD}/mc" mb minio/testbucket
|
||||
"${PWD}/mc" anonymous set public minio/testbucket
|
||||
|
||||
# slow header writing
|
||||
send_put_object_request 20 0 && exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
|
||||
|
||||
# quick header write and slow bodywrite
|
||||
send_put_object_request 0 40 && exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject && exit -1
|
||||
|
||||
# quick header and body write
|
||||
send_put_object_request 1 1 || exit -1
|
||||
"${PWD}/mc" stat minio/testbucket/testobject || exit -1
|
||||
}
|
||||
|
||||
function main() {
|
||||
export start_port=$(shuf -i 10000-65000 -n 1)
|
||||
export srv_hdr_timeout=5
|
||||
export srv_idle_timeout=5
|
||||
export -f gen_put_request
|
||||
|
||||
test_minio_with_timeout ${start_port}
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: minio/minio:${MINIO_VERSION}
|
||||
|
||||
@@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
export MINT_MODE=core
|
||||
export MINT_DATA_DIR="$WORK_DIR/data"
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
|
||||
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
|
||||
export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
@@ -36,18 +37,21 @@ function start_minio_fs() {
|
||||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure() {
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets() {
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets() {
|
||||
@@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() {
|
||||
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets_ipv6() {
|
||||
@@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() {
|
||||
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify_ipv6
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure() {
|
||||
@@ -78,7 +82,7 @@ function start_minio_dist_erasure() {
|
||||
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function run_test_fs() {
|
||||
@@ -222,7 +226,7 @@ function __init__() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
151
buildscripts/verify-healing-empty-erasure-set.sh
Executable file
151
buildscripts/verify-healing-empty-erasure-set.sh
Executable file
@@ -0,0 +1,151 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
pid1=$!
|
||||
disown ${pid1}
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
pid2=$!
|
||||
disown $pid2
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
# Wait for all drives to be online and formatted
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
# Wait for all drives to be healed
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
|
||||
# Wait for Status: in MinIO output
|
||||
while true; do
|
||||
rv=$(check_online)
|
||||
if [ "$rv" != "1" ]; then
|
||||
# success
|
||||
break
|
||||
fi
|
||||
|
||||
# Check if we should retry
|
||||
retry=$((retry + 1))
|
||||
if [ $retry -le 20 ]; then
|
||||
sleep 5
|
||||
continue
|
||||
fi
|
||||
|
||||
# Failure
|
||||
fail
|
||||
done
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "minio-server-1 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "minio-server-2 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "minio-server-3 is not running." && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge() {
|
||||
echo rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
mkdir -p "$MINIO_CONFIG_DIR"
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node $2
|
||||
}
|
||||
|
||||
function main() {
|
||||
# use same ports for all tests
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
perform_test "2" ${start_port}
|
||||
perform_test "1" ${start_port}
|
||||
perform_test "3" ${start_port}
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
@@ -12,17 +12,26 @@ fi
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
GOPATH=/tmp/gopath
|
||||
|
||||
function start_minio_3_node() {
|
||||
for i in $(seq 1 3); do
|
||||
rm "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$2
|
||||
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
for d in $(seq 1 3 5); do
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
d=$((d + 1))
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
@@ -37,40 +46,26 @@ function start_minio_3_node() {
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
timeout 15m /tmp/mc ready myminio || fail
|
||||
|
||||
[ ${first_time} -eq 0 ] && upload_objects
|
||||
[ ${first_time} -ne 0 ] && sleep 120
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 1 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 2 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
echo "minio server 3 is not running" && fail
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
@@ -82,16 +77,40 @@ function start_minio_3_node() {
|
||||
fi
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
function check_heal() {
|
||||
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
test -f ${WORK_DIR}/$1/1/.minio.sys/format.json
|
||||
v1=$?
|
||||
nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1
|
||||
foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
test $foundFiles1 -eq $foundFiles2
|
||||
v2=$?
|
||||
[ $v1 == 0 -a $v2 == 0 ] && return 0
|
||||
sleep 10
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
function purge() {
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
function fail() {
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
@@ -99,29 +118,37 @@ function __init__() {
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_objects() {
|
||||
/tmp/mc mb myminio/testbucket/
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
|
||||
done
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 120 $2
|
||||
start_port=$2
|
||||
|
||||
start_minio_3_node $start_port
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
echo "Remove the contents of the disks belonging to '${1}' node"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node 120 $2
|
||||
start_minio_3_node $start_port
|
||||
|
||||
rv=$(check_online)
|
||||
check_heal ${1}
|
||||
rv=$?
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fail
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy access control
|
||||
|
||||
@@ -38,10 +38,10 @@ import (
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/event"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -99,7 +99,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
@@ -428,10 +428,25 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
cfgPath := pathJoin(bi.Name, cfgFile)
|
||||
bucket := bi.Name
|
||||
switch cfgFile {
|
||||
case bucketPolicyConfig:
|
||||
config, _, err := globalBucketMetadataSys.GetBucketPolicy(bucket)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketPolicyNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
configData, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
rawDataFn(bytes.NewReader(configData), cfgPath, len(configData))
|
||||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -447,7 +462,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -736,7 +751,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
@@ -784,7 +799,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
switch fileName {
|
||||
case bucketNotificationConfig:
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
@@ -797,11 +812,12 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
|
||||
bucketMap[bucket].NotificationConfigXML = configData
|
||||
bucketMap[bucket].NotificationConfigUpdatedAt = updatedAt
|
||||
rpt.SetStatus(bucket, fileName, nil)
|
||||
case bucketPolicyConfig:
|
||||
// Error out if Content-Length is beyond allowed size.
|
||||
if sz > maxBucketPolicySize {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyTooLarge.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -819,7 +835,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
|
||||
// Version in policy must not be empty
|
||||
if bucketPolicy.Version == "" {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String()))
|
||||
rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyInvalidVersion.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -838,9 +854,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
@@ -875,8 +895,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: kmsKey,
|
||||
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
|
||||
@@ -959,7 +981,6 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
||||
rpt.SetStatus(bucket, "", err)
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
|
||||
@@ -1018,7 +1039,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
|
||||
}
|
||||
if len(diffCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(diffCh) > 0 {
|
||||
@@ -1027,7 +1048,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -1077,7 +1098,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
if len(mrfCh) == 0 {
|
||||
// Flush if nothing is queued
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
}
|
||||
case <-keepAliveTicker.C:
|
||||
if len(mrfCh) > 0 {
|
||||
@@ -1086,7 +1107,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
xhttp.Flush(w)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// validateAdminReq will validate request against and return whether it is allowed.
|
||||
@@ -216,6 +216,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
case errors.Is(err, errTierInvalidConfig):
|
||||
apiErr = APIError{
|
||||
Code: "XMinioAdminTierInvalidConfig",
|
||||
Description: err.Error(),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}
|
||||
default:
|
||||
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
@@ -58,7 +58,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -443,7 +443,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
||||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,10 +31,9 @@ import (
|
||||
"github.com/minio/minio/internal/config"
|
||||
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/ldap"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
|
||||
@@ -60,7 +59,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -126,7 +125,6 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
if err = validateConfig(ctx, cfg, subSys); err != nil {
|
||||
|
||||
var validationErr ldap.Validation
|
||||
if errors.As(err, &validationErr) {
|
||||
// If we got an LDAP validation error, we need to send appropriate
|
||||
@@ -417,7 +415,6 @@ func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
if err = validateConfig(ctx, cfg, subSys); err != nil {
|
||||
|
||||
var validationErr ldap.Validation
|
||||
if errors.As(err, &validationErr) {
|
||||
// If we got an LDAP validation error, we need to send appropriate
|
||||
|
||||
@@ -27,9 +27,9 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
|
||||
@@ -105,6 +105,12 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
|
||||
// fail if ldap is not enabled
|
||||
if !globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
@@ -132,7 +138,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -184,7 +190,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
||||
//
|
||||
// PUT /minio/admin/v3/idp/ldap/add-service-account
|
||||
func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r)
|
||||
ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, true)
|
||||
if APIError.Code != "" {
|
||||
writeErrorResponseJSON(ctx, w, APIError, r.URL)
|
||||
return
|
||||
@@ -192,7 +198,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
|
||||
// fail if ldap is not enabled
|
||||
if !globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errors.New("LDAP not enabled")), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -208,19 +214,15 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
|
||||
|
||||
var (
|
||||
targetGroups []string
|
||||
err error
|
||||
)
|
||||
|
||||
// If we are creating svc account for request sender, ensure
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
// credentials).
|
||||
// If we are creating svc account for request sender, ensure that targetUser
|
||||
// is a real user (i.e. not derived credentials).
|
||||
if isSvcAccForRequestor {
|
||||
if requestorIsDerivedCredential {
|
||||
if requestorParentUser == "" {
|
||||
@@ -233,12 +235,12 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
targetGroups = requestorGroups
|
||||
|
||||
// Deny if the target user is not LDAP
|
||||
foundLDAPDN, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundLDAPDN == "" {
|
||||
if foundResult == nil {
|
||||
err := errors.New("Specified user does not exist on LDAP server")
|
||||
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
|
||||
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
|
||||
@@ -254,20 +256,48 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
opts.claims[k] = v
|
||||
}
|
||||
} else {
|
||||
isDN := globalIAMSys.LDAPConfig.IsLDAPUserDN(targetUser)
|
||||
// We still need to ensure that the target user is a valid LDAP user.
|
||||
//
|
||||
// The target user may be supplied as a (short) username or a DN.
|
||||
// However, for now, we only support using the short username.
|
||||
|
||||
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
// if not found, check if DN
|
||||
if strings.Contains(err.Error(), "not found") && isDN {
|
||||
// warn user that DNs are not allowed
|
||||
err = fmt.Errorf("Must use short username to add service account. %w", err)
|
||||
if strings.Contains(err.Error(), "User DN not found for:") {
|
||||
if isDN {
|
||||
// warn user that DNs are not allowed
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminLDAPExpectedLoginName, err), r.URL)
|
||||
} else {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
|
||||
}
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Check if this user or their groups have a policy applied.
|
||||
ldapPolicies, err := globalIAMSys.PolicyDBGet(targetUser, targetGroups...)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if len(ldapPolicies) == 0 {
|
||||
err = fmt.Errorf("No policy set for user `%s` or any of their groups: `%s`", opts.claims[ldapActualUser], strings.Join(targetGroups, "`,`"))
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
}
|
||||
|
||||
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
|
||||
@@ -301,7 +331,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
@@ -312,7 +342,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
Name: newCred.Name,
|
||||
Description: newCred.Description,
|
||||
Claims: opts.claims,
|
||||
SessionPolicy: createReq.Policy,
|
||||
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
|
||||
Status: auth.AccountOn,
|
||||
Expiration: createReq.Expiration,
|
||||
},
|
||||
@@ -374,14 +404,16 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
targetAccount, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
} else if userDN == "" {
|
||||
}
|
||||
if dnResult == nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
|
||||
return
|
||||
}
|
||||
targetAccount := dnResult.NormDN
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {
|
||||
@@ -444,3 +476,178 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListAccessKeysLDAPBulk - GET /minio/admin/v3/idp/ldap/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dnList := r.Form["userDNs"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(dnList) == 0
|
||||
|
||||
if isAll && len(dnList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty DN list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(dnList) == 1 {
|
||||
var dn string
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(dnList[0])
|
||||
if err == nil {
|
||||
dn = foundResult.NormDN
|
||||
}
|
||||
if dn == cred.ParentUser || dnList[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(dnList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
}
|
||||
dnList = append(dnList, selfDN)
|
||||
}
|
||||
|
||||
var ldapUserList []string
|
||||
if isAll {
|
||||
ldapUsers, err := globalIAMSys.ListLDAPUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for user := range ldapUsers {
|
||||
ldapUserList = append(ldapUserList, user)
|
||||
}
|
||||
} else {
|
||||
for _, userDN := range dnList {
|
||||
// Validate the userDN
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundResult == nil {
|
||||
continue
|
||||
}
|
||||
ldapUserList = append(ldapUserList, foundResult.NormDN)
|
||||
}
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
accessKeyMap := make(map[string]madmin.ListAccessKeysLDAPResp)
|
||||
for _, internalDN := range ldapUserList {
|
||||
externalDN := globalIAMSys.LDAPConfig.DecodeDN(internalDN)
|
||||
accessKeys := madmin.ListAccessKeysLDAPResp{}
|
||||
if listSTSKeys {
|
||||
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, internalDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &sts.Expiration,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
if !listServiceAccounts && len(stsKeys) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if listServiceAccounts {
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, internalDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
if !listSTSKeys && len(serviceAccounts) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
accessKeyMap[externalDN] = accessKeys
|
||||
}
|
||||
|
||||
data, err := json.Marshal(accessKeyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
246
cmd/admin-handlers-idp-openid.go
Normal file
246
cmd/admin-handlers-idp-openid.go
Normal file
@@ -0,0 +1,246 @@
|
||||
// Copyright (c) 2015-2025 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const dummyRoleARN = "dummy-internal"
|
||||
|
||||
// ListAccessKeysOpenIDBulk - GET /minio/admin/v3/idp/openid/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !globalIAMSys.OpenIDConfig.Enabled {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminOpenIDNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userList := r.Form["users"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
selfOnly := !isAll && len(userList) == 0
|
||||
cfgName := r.Form.Get("configName")
|
||||
allConfigs := r.Form.Get("allConfigs") == "true"
|
||||
if cfgName == "" && !allConfigs {
|
||||
cfgName = madmin.Default
|
||||
}
|
||||
|
||||
if isAll && len(userList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty DN list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(userList) == 1 && userList[0] == cred.ParentUser {
|
||||
selfOnly = true
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: selfOnly,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if selfOnly && len(userList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
}
|
||||
userList = append(userList, selfDN)
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
s := globalServerConfig.Clone()
|
||||
roleArnMap := make(map[string]string)
|
||||
// Map of configs to a map of users to their access keys
|
||||
cfgToUsersMap := make(map[string]map[string]madmin.OpenIDUserAccessKeys)
|
||||
configs, err := globalIAMSys.OpenIDConfig.GetConfigList(s)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, config := range configs {
|
||||
if !allConfigs && cfgName != config.Name {
|
||||
continue
|
||||
}
|
||||
arn := dummyRoleARN
|
||||
if config.RoleARN != "" {
|
||||
arn = config.RoleARN
|
||||
}
|
||||
roleArnMap[arn] = config.Name
|
||||
newResp := make(map[string]madmin.OpenIDUserAccessKeys)
|
||||
cfgToUsersMap[config.Name] = newResp
|
||||
}
|
||||
if len(roleArnMap) == 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
userSet := set.CreateStringSet(userList...)
|
||||
accessKeys, err := globalIAMSys.ListAllAccessKeys(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for _, accessKey := range accessKeys {
|
||||
// Filter out any disqualifying access keys
|
||||
_, ok := accessKey.Claims[subClaim]
|
||||
if !ok {
|
||||
continue // OpenID access keys must have a sub claim
|
||||
}
|
||||
if (!listSTSKeys && !accessKey.IsServiceAccount()) || (!listServiceAccounts && accessKey.IsServiceAccount()) {
|
||||
continue // skip if not the type we want
|
||||
}
|
||||
arn, ok := accessKey.Claims[roleArnClaim].(string)
|
||||
if !ok {
|
||||
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
|
||||
continue // skip if no roleArn and no policy claim
|
||||
}
|
||||
}
|
||||
matchingCfgName, ok := roleArnMap[arn]
|
||||
if !ok {
|
||||
continue // skip if not part of the target config
|
||||
}
|
||||
var id string
|
||||
if idClaim := globalIAMSys.OpenIDConfig.GetUserIDClaim(matchingCfgName); idClaim != "" {
|
||||
id, _ = accessKey.Claims[idClaim].(string)
|
||||
}
|
||||
if !userSet.IsEmpty() && !userSet.Contains(accessKey.ParentUser) && !userSet.Contains(id) {
|
||||
continue // skip if not in the user list
|
||||
}
|
||||
openIDUserAccessKeys, ok := cfgToUsersMap[matchingCfgName][accessKey.ParentUser]
|
||||
|
||||
// Add new user to map if not already present
|
||||
if !ok {
|
||||
var readableClaim string
|
||||
if rc := globalIAMSys.OpenIDConfig.GetUserReadableClaim(matchingCfgName); rc != "" {
|
||||
readableClaim, _ = accessKey.Claims[rc].(string)
|
||||
}
|
||||
openIDUserAccessKeys = madmin.OpenIDUserAccessKeys{
|
||||
MinioAccessKey: accessKey.ParentUser,
|
||||
ID: id,
|
||||
ReadableName: readableClaim,
|
||||
}
|
||||
}
|
||||
svcAccInfo := madmin.ServiceAccountInfo{
|
||||
AccessKey: accessKey.AccessKey,
|
||||
Expiration: &accessKey.Expiration,
|
||||
}
|
||||
if accessKey.IsServiceAccount() {
|
||||
openIDUserAccessKeys.ServiceAccounts = append(openIDUserAccessKeys.ServiceAccounts, svcAccInfo)
|
||||
} else {
|
||||
openIDUserAccessKeys.STSKeys = append(openIDUserAccessKeys.STSKeys, svcAccInfo)
|
||||
}
|
||||
cfgToUsersMap[matchingCfgName][accessKey.ParentUser] = openIDUserAccessKeys
|
||||
}
|
||||
|
||||
// Convert map to slice and sort
|
||||
resp := make([]madmin.ListAccessKeysOpenIDResp, 0, len(cfgToUsersMap))
|
||||
for cfgName, usersMap := range cfgToUsersMap {
|
||||
users := make([]madmin.OpenIDUserAccessKeys, 0, len(usersMap))
|
||||
for _, user := range usersMap {
|
||||
users = append(users, user)
|
||||
}
|
||||
sort.Slice(users, func(i, j int) bool {
|
||||
return users[i].MinioAccessKey < users[j].MinioAccessKey
|
||||
})
|
||||
resp = append(resp, madmin.ListAccessKeysOpenIDResp{
|
||||
ConfigName: cfgName,
|
||||
Users: users,
|
||||
})
|
||||
}
|
||||
sort.Slice(resp, func(i, j int) bool {
|
||||
return resp[i].ConfigName < resp[j].ConfigName
|
||||
})
|
||||
|
||||
data, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
@@ -26,10 +26,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -210,7 +209,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -243,7 +242,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
poolsStatus[idx] = status
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -259,8 +258,8 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
||||
// concurrent rebalance-start commands.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -330,8 +329,8 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
||||
// pools may temporarily have out of date info on the others.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -350,11 +349,11 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -374,7 +373,8 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
|
||||
// Cancel any ongoing rebalance operation
|
||||
globalNotificationSys.StopRebalance(r.Context())
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
globalNotificationSys.LoadRebalanceMeta(ctx, false)
|
||||
}
|
||||
|
||||
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
|
||||
@@ -383,8 +383,8 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h
|
||||
return
|
||||
}
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
if proxyEp.Host == host && !proxyEp.IsLocal {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,9 +32,8 @@ import (
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
|
||||
@@ -55,7 +54,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
||||
opts := getSRAddOptions(r)
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -93,7 +92,7 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -140,7 +139,7 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
|
||||
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -192,7 +191,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
|
||||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -263,7 +262,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
||||
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -316,7 +315,6 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio
|
||||
if encryptionKey != "" {
|
||||
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
@@ -349,6 +347,18 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled
|
||||
var replicateILMExpiry bool
|
||||
for _, site := range info.Sites {
|
||||
if site.ReplicateILMExpiry {
|
||||
replicateILMExpiry = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !replicateILMExpiry {
|
||||
// explicitly send nil for ILMExpiryStats
|
||||
info.ILMExpiryStats = nil
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -396,7 +406,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
||||
opts := getSREditOptions(r)
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -433,7 +443,7 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -456,7 +466,7 @@ func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -493,7 +503,7 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -524,7 +534,7 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -586,7 +596,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.
|
||||
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
|
||||
// would mean the network is not stable. Logging here will help in debugging network issues.
|
||||
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -609,5 +619,5 @@ func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http.
|
||||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
result := siteNetperf(r.Context(), duration)
|
||||
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
@@ -120,9 +120,12 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
}
|
||||
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -28,6 +28,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -39,7 +40,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -159,7 +160,7 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
|
||||
s.TestSuiteCommon.RestartTestServer(c)
|
||||
s.RestartTestServer(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
}
|
||||
@@ -239,9 +240,12 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
||||
c.Assert(v.Status, madmin.AccountEnabled)
|
||||
|
||||
// 3. Associate policy and check that user can access
|
||||
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
@@ -334,23 +338,34 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 2.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -436,7 +451,7 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::testbucket/*"
|
||||
"arn:aws:s3:::testbucket"
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -470,9 +485,12 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, "testbucket")
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy1, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
admClnt := s.getAdminClient(c, accessKey, secretKey, "")
|
||||
@@ -490,10 +508,22 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
||||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy2, accessKey, false)
|
||||
// Detach policy1 to set up for policy2
|
||||
_, err = s.adm.DetachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to detach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy2},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.3 check user can create service account implicitly.
|
||||
@@ -538,16 +568,24 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -571,9 +609,12 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -645,16 +686,24 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
@@ -667,6 +716,12 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
c.Fatalf("policy info err: %v", err)
|
||||
}
|
||||
|
||||
// Check that policy with comma is rejected.
|
||||
err = s.adm.AddCannedPolicy(ctx, "invalid,policy", policyBytes)
|
||||
if err == nil {
|
||||
c.Fatalf("invalid policy created successfully")
|
||||
}
|
||||
|
||||
infoStr := string(info)
|
||||
if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") {
|
||||
c.Fatalf("policy contains unexpected content!")
|
||||
@@ -690,16 +745,24 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -726,9 +789,12 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3. Associate policy to group and check user got access.
|
||||
err = s.adm.SetPolicy(ctx, policy, group, true)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: group,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.1 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
@@ -743,8 +809,9 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
if err != nil {
|
||||
c.Fatalf("group list err: %v", err)
|
||||
}
|
||||
if !set.CreateStringSet(groups...).Contains(group) {
|
||||
c.Fatalf("created group not present!")
|
||||
expected := []string{group}
|
||||
if !slices.Equal(groups, expected) {
|
||||
c.Fatalf("expected group listing: %v, got: %v", expected, groups)
|
||||
}
|
||||
groupInfo, err := s.adm.GetGroupDescription(ctx, group)
|
||||
if err != nil {
|
||||
@@ -850,16 +917,24 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -871,9 +946,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
@@ -931,16 +1009,24 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -952,9 +1038,12 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
@@ -1010,16 +1099,24 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket, bucket))
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -1031,9 +1128,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a service account for the user
|
||||
@@ -1564,7 +1664,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::%s/*"
|
||||
"arn:aws:s3:::%s"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -263,7 +263,7 @@ func buildAdminRequest(queryVal url.Values, method, path string,
|
||||
}
|
||||
|
||||
func TestAdminServerInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
adminTestBed, err := prepareAdminErasureTestBed(ctx)
|
||||
@@ -463,6 +463,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
Owner: lri.Owner,
|
||||
ID: lri.UID,
|
||||
Quorum: lri.Quorum,
|
||||
Timestamp: time.Unix(0, lri.Timestamp),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -63,8 +63,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
|
||||
errHealStopSignalled = errors.New("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
@@ -260,7 +260,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
|
||||
} else {
|
||||
clientToken := he.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", he.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
hsp = madmin.HealStopSuccess{
|
||||
@@ -329,12 +329,16 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
// Add heal state and start sequence
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart(objAPI)
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
clientToken = fmt.Sprintf("%s%s%d", h.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
if h.clientToken == bgHealingUUID {
|
||||
// For background heal do nothing, do not spawn an unnecessary goroutine.
|
||||
} else {
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart(objAPI)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
@@ -343,7 +347,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
bugLogIf(h.ctx, err)
|
||||
return nil, toAdminAPIErr(h.ctx, err), ""
|
||||
}
|
||||
return b, noError, ""
|
||||
@@ -390,7 +394,7 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
||||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
bugLogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
@@ -451,8 +455,8 @@ type healSequence struct {
|
||||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
// Number of total items where healing failed against item type
|
||||
healFailedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
@@ -493,7 +497,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
||||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
healFailedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -537,14 +541,14 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
return retMap
|
||||
}
|
||||
|
||||
// gethealFailedItemsMap - returns map of all items where heal failed against
|
||||
// getHealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
@@ -552,6 +556,30 @@ func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
return retMap
|
||||
}
|
||||
|
||||
func (h *healSequence) countFailed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healFailedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countScanned(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countHealed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
// isQuitting - determines if the heal sequence is quitting (due to an
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
@@ -704,10 +732,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
h.countScanned(healType)
|
||||
|
||||
if source.noWait {
|
||||
select {
|
||||
@@ -736,42 +761,40 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
||||
return nil
|
||||
}
|
||||
|
||||
countOKDrives := func(drives []madmin.HealDriveInfo) (count int) {
|
||||
for _, drive := range drives {
|
||||
if drive.State == madmin.DriveStateOk {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// task queued, now wait for the response.
|
||||
select {
|
||||
case res := <-task.respCh:
|
||||
if res.err == nil {
|
||||
h.countHealed(healType)
|
||||
} else {
|
||||
h.countFailed(healType)
|
||||
}
|
||||
if !h.reportProgress {
|
||||
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
if res.result.ParityBlocks > 0 && res.result.DataBlocks > 0 && res.result.DataBlocks > res.result.ParityBlocks {
|
||||
if got := countOKDrives(res.result.After.Drives); got < res.result.ParityBlocks {
|
||||
res.result.Detail = fmt.Sprintf("quorum loss - expected %d minimum, got drive states in OK %d", res.result.ParityBlocks, got)
|
||||
}
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
case <-h.ctx.Done():
|
||||
return nil
|
||||
@@ -783,18 +806,20 @@ func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error {
|
||||
return h.healMinioSysMeta(objAPI, minioConfigPrefix)()
|
||||
}
|
||||
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healItems(objAPI ObjectLayer) error {
|
||||
if h.clientToken == bgHealingUUID {
|
||||
// For background heal do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
if h.bucket == "" { // heal internal meta only during a site-wide heal
|
||||
if err := h.healDiskMeta(objAPI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Heal buckets and objects
|
||||
return h.healBuckets(objAPI, bucketsOnly)
|
||||
return h.healBuckets(objAPI)
|
||||
}
|
||||
|
||||
// traverseAndHeal - traverses on-disk data and performs healing
|
||||
@@ -805,8 +830,7 @@ func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
// has to wait until a safe point is reached, such as between scanning
|
||||
// two objects.
|
||||
func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) {
|
||||
bucketsOnly := false // Heals buckets and objects also.
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly)
|
||||
h.traverseAndHealDoneCh <- h.healItems(objAPI)
|
||||
xioutil.SafeClose(h.traverseAndHealDoneCh)
|
||||
}
|
||||
|
||||
@@ -817,6 +841,7 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
// NOTE: Healing on meta is run regardless
|
||||
// of any bucket being selected, this is to ensure that
|
||||
// meta are always upto date and correct.
|
||||
h.settings.Recursive = true
|
||||
return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string, scanMode madmin.HealScanMode) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
@@ -833,14 +858,14 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
func (h *healSequence) healBuckets(objAPI ObjectLayer) error {
|
||||
if h.isQuitting() {
|
||||
return errHealStopSignalled
|
||||
}
|
||||
|
||||
// 1. If a bucket was specified, heal only the bucket.
|
||||
if h.bucket != "" {
|
||||
return h.healBucket(objAPI, h.bucket, bucketsOnly)
|
||||
return h.healBucket(objAPI, h.bucket, false)
|
||||
}
|
||||
|
||||
buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{})
|
||||
@@ -854,7 +879,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
|
||||
})
|
||||
|
||||
for _, bucket := range buckets {
|
||||
if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil {
|
||||
if err = h.healBucket(objAPI, bucket.Name, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -872,16 +897,6 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
if !h.settings.Recursive {
|
||||
if h.object != "" {
|
||||
if err := h.healObject(bucket, h.object, "", h.settings.ScanMode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
|
||||
return errFnHealFromAPIErr(h.ctx, err)
|
||||
}
|
||||
|
||||
@@ -159,14 +159,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(adminMiddleware(adminAPI.ServerInfoHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceHdrsS3HFlag))
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(adminMiddleware(adminAPI.StorageInfoHandler, traceAllFlag))
|
||||
// DataUsageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(adminMiddleware(adminAPI.DataUsageInfoHandler, traceAllFlag))
|
||||
// Metrics operation
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceAllFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceHdrsS3HFlag))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
// Heal operations
|
||||
@@ -193,9 +193,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// Profiling operations - deprecated API
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(adminMiddleware(adminAPI.StartProfilingHandler, traceAllFlag, noObjLayerFlag)).
|
||||
Queries("profilerType", "{profilerType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceAllFlag, noObjLayerFlag))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceHdrsS3HFlag, noObjLayerFlag))
|
||||
|
||||
// Config KV operations.
|
||||
if enableConfigOps {
|
||||
@@ -244,6 +244,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// STS accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(adminMiddleware(adminAPI.TemporaryAccountInfo)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Access key (service account/STS) operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-access-keys-bulk").HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysBulk)).Queries("listType", "{listType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-access-key").HandlerFunc(adminMiddleware(adminAPI.InfoAccessKey)).Queries("accessKey", "{accessKey:.*}")
|
||||
|
||||
// Info policy IAM latest
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
|
||||
// List policies latest
|
||||
@@ -290,6 +294,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
|
||||
// Import IAM info
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag))
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam-v2").HandlerFunc(adminMiddleware(adminAPI.ImportIAMV2, noGZFlag))
|
||||
|
||||
// IDentity Provider configuration APIs
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg))
|
||||
@@ -301,12 +306,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// LDAP specific service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).
|
||||
Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys-bulk").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAPBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// LDAP IAM operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP))
|
||||
|
||||
// OpenID specific service accounts ops
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/openid/list-access-keys-bulk").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysOpenIDBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// -- END IAM APIs --
|
||||
|
||||
// GetBucketQuotaConfig
|
||||
@@ -340,6 +351,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
|
||||
adminMiddleware(adminAPI.ListBatchJobs))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/status-job").HandlerFunc(
|
||||
adminMiddleware(adminAPI.BatchJobStatus))
|
||||
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
|
||||
adminMiddleware(adminAPI.DescribeBatchJob))
|
||||
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/cancel-job").HandlerFunc(
|
||||
@@ -416,6 +430,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
||||
// -- Health API --
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
|
||||
|
||||
// STS Revocation
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/revoke-tokens/{userProvider}").HandlerFunc(adminMiddleware(adminAPI.RevokeTokens))
|
||||
}
|
||||
|
||||
// If none of the routes match add default error handler routes
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -31,7 +30,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
@@ -65,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
||||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
logger.LogOnceIf(context.Background(), err, nodeName)
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
} else if xnet.IsNetworkOrHostDown(err, true) {
|
||||
network[nodeName] = "connection attempt timedout"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,8 @@ type DeletedObject struct {
|
||||
DeleteMarkerMTime DeleteMarkerMTime `xml:"-"`
|
||||
// MinIO extensions to support delete marker replication
|
||||
ReplicationState ReplicationState `xml:"-"`
|
||||
|
||||
found bool // the object was found during deletion
|
||||
}
|
||||
|
||||
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
|
||||
@@ -42,10 +44,10 @@ type DeleteMarkerMTime struct {
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
if t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
return e.EncodeElement(t.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectV object version key/versionId
|
||||
@@ -67,7 +69,7 @@ type ObjectToDelete struct {
|
||||
ReplicateDecisionStr string `xml:"-"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
// createBucketLocationConfiguration container for bucket configuration request from client.
|
||||
// Used for parsing the location from the request body for Makebucket.
|
||||
type createBucketLocationConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
@@ -48,7 +48,7 @@ import (
|
||||
levent "github.com/minio/minio/internal/config/lambda/event"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
@@ -56,19 +56,23 @@ type APIError struct {
|
||||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
ObjectSize string
|
||||
RangeRequested string
|
||||
}
|
||||
|
||||
// APIErrorResponse - error response format
|
||||
type APIErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
ActualObjectSize string `xml:"ActualObjectSize,omitempty" json:"ActualObjectSize,omitempty"`
|
||||
RangeRequested string `xml:"RangeRequested,omitempty" json:"RangeRequested,omitempty"`
|
||||
}
|
||||
|
||||
// APIErrorCode type of error status.
|
||||
@@ -209,6 +213,10 @@ const (
|
||||
ErrPolicyAlreadyAttached
|
||||
ErrPolicyNotAttached
|
||||
ErrExcessData
|
||||
ErrPolicyInvalidName
|
||||
ErrNoTokenRevokeType
|
||||
ErrAdminOpenIDNotEnabled
|
||||
ErrAdminNoSuchAccessKey
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3/SSE-KMS related API errors
|
||||
@@ -263,6 +271,7 @@ const (
|
||||
ErrInvalidResourceName
|
||||
ErrInvalidLifecycleQueryParameter
|
||||
ErrServerNotInitialized
|
||||
ErrBucketMetadataNotInitialized
|
||||
ErrRequestTimedout
|
||||
ErrClientDisconnected
|
||||
ErrTooManyRequests
|
||||
@@ -278,9 +287,11 @@ const (
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchUserLDAPWarn
|
||||
ErrAdminLDAPExpectedLoginName
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminGroupDisabled
|
||||
ErrAdminInvalidGroupName
|
||||
ErrAdminNoSuchJob
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminPolicyChangeAlreadyApplied
|
||||
@@ -300,6 +311,7 @@ const (
|
||||
ErrAdminConfigIDPCfgNameDoesNotExist
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
ErrAdminLDAPNotEnabled
|
||||
|
||||
// Site-Replication errors
|
||||
ErrSiteReplicationInvalidRequest
|
||||
@@ -418,6 +430,7 @@ const (
|
||||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAddUserValidUTF
|
||||
ErrAdminResourceInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
@@ -436,6 +449,8 @@ const (
|
||||
ErrAdminNoAccessKey
|
||||
ErrAdminNoSecretKey
|
||||
|
||||
ErrIAMNotInitialized
|
||||
|
||||
apiErrCodeEnd // This is used only for the testing code
|
||||
)
|
||||
|
||||
@@ -449,9 +464,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
||||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalSite.Region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
if errCode == ErrAuthorizationHeaderMalformed {
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
@@ -550,6 +565,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "More data provided than indicated content length",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyInvalidName: {
|
||||
Code: "PolicyInvalidName",
|
||||
Description: "Policy name may not contain comma",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminOpenIDNotEnabled: {
|
||||
Code: "OpenIDNotEnabled",
|
||||
Description: "No enabled OpenID Connect identity providers",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPolicyTooLarge: {
|
||||
Code: "PolicyTooLarge",
|
||||
Description: "Policy exceeds the maximum allowed document size.",
|
||||
@@ -612,7 +637,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrMissingContentMD5: {
|
||||
Code: "MissingContentMD5",
|
||||
Description: "Missing required header for this request: Content-Md5.",
|
||||
Description: "Missing or invalid required header for this request: Content-Md5 or Amz-Content-Checksum",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSecurityHeader: {
|
||||
@@ -958,7 +983,7 @@ var errorCodes = errorCodeMap{
|
||||
ErrReplicationRemoteConnectionError: {
|
||||
Code: "XMinioAdminReplicationRemoteConnectionError",
|
||||
Description: "Remote service connection error",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrReplicationBandwidthLimitError: {
|
||||
Code: "XMinioAdminReplicationBandwidthLimitError",
|
||||
@@ -967,7 +992,7 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrReplicationNoExistingObjects: {
|
||||
Code: "XMinioReplicationNoExistingObjects",
|
||||
Description: "No matching ExistingsObjects rule enabled",
|
||||
Description: "No matching ExistingObjects rule enabled",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrRemoteTargetDenyAddError: {
|
||||
@@ -1247,6 +1272,16 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The security token included in the request is invalid",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrNoTokenRevokeType: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "No token revoke type specified and one could not be inferred from the request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchAccessKey: {
|
||||
Code: "XMinioAdminNoSuchAccessKey",
|
||||
Description: "The specified access key does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
@@ -1293,7 +1328,17 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrServerNotInitialized: {
|
||||
Code: "XMinioServerNotInitialized",
|
||||
Description: "Server not initialized, please try again.",
|
||||
Description: "Server not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrIAMNotInitialized: {
|
||||
Code: "XMinioIAMNotInitialized",
|
||||
Description: "IAM sub-system not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrBucketMetadataNotInitialized: {
|
||||
Code: "XMinioBucketMetadataNotInitialized",
|
||||
Description: "Bucket metadata not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrMalformedJSON: {
|
||||
@@ -1465,8 +1510,8 @@ var errorCodes = errorCodeMap{
|
||||
},
|
||||
ErrTooManyRequests: {
|
||||
Code: "TooManyRequests",
|
||||
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
Description: "Please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
@@ -2079,7 +2124,26 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Invalid attribute name specified.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// Add your error structure here.
|
||||
ErrAdminLDAPNotEnabled: {
|
||||
Code: "XMinioLDAPNotEnabled",
|
||||
Description: "LDAP is not enabled. LDAP must be enabled to make LDAP requests.",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrAdminLDAPExpectedLoginName: {
|
||||
Code: "XMinioLDAPExpectedLoginName",
|
||||
Description: "Expected LDAP short username but was given full DN.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidGroupName: {
|
||||
Code: "XMinioInvalidGroupName",
|
||||
Description: "The group name is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAddUserValidUTF: {
|
||||
Code: "XMinioInvalidUTF",
|
||||
Description: "Invalid UTF-8 character detected.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
@@ -2115,10 +2179,14 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrAdminNoSuchUserLDAPWarn
|
||||
case errNoSuchServiceAccount:
|
||||
apiErr = ErrAdminServiceAccountNotFound
|
||||
case errNoSuchAccessKey:
|
||||
apiErr = ErrAdminNoSuchAccessKey
|
||||
case errNoSuchGroup:
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errGroupNameContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidGroupName
|
||||
case errNoSuchJob:
|
||||
apiErr = ErrAdminNoSuchJob
|
||||
case errNoPolicyToAttachOrDetach:
|
||||
@@ -2133,6 +2201,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooSmall
|
||||
case errAuthentication:
|
||||
apiErr = ErrAccessDenied
|
||||
case auth.ErrContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
@@ -2200,6 +2270,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrInvalidMaxParts
|
||||
case ioutil.ErrOverread:
|
||||
apiErr = ErrExcessData
|
||||
case errServerNotInitialized:
|
||||
apiErr = ErrServerNotInitialized
|
||||
case errBucketMetadataNotInitialized:
|
||||
apiErr = ErrBucketMetadataNotInitialized
|
||||
case hash.ErrInvalidChecksum:
|
||||
apiErr = ErrInvalidChecksum
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
@@ -2391,10 +2467,9 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
switch apiErr.Code {
|
||||
case "NotImplemented":
|
||||
desc := fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: desc,
|
||||
Description: fmt.Sprintf("%s (%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
case "XMinioBackendDown":
|
||||
@@ -2406,12 +2481,24 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
switch e := err.(type) {
|
||||
case kms.Error:
|
||||
apiErr = APIError{
|
||||
Description: e.Err.Error(),
|
||||
Code: e.APICode,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: e.Code,
|
||||
}
|
||||
case batchReplicationJobError:
|
||||
apiErr = APIError(e)
|
||||
apiErr = APIError{
|
||||
Description: e.Description,
|
||||
Code: e.Code,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
}
|
||||
case InvalidRange:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRange",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: errorCodes[ErrInvalidRange].HTTPStatusCode,
|
||||
ObjectSize: strconv.FormatInt(e.ResourceSize, 10),
|
||||
RangeRequested: fmt.Sprintf("%d-%d", e.OffsetBegin, e.OffsetEnd),
|
||||
}
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
@@ -2490,11 +2577,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
if len(e.Errors) >= 1 {
|
||||
apiErr.Code = e.Errors[0].Reason
|
||||
}
|
||||
case azblob.StorageError:
|
||||
case *azcore.ResponseError:
|
||||
apiErr = APIError{
|
||||
Code: string(e.ServiceCode()),
|
||||
Code: e.ErrorCode,
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
}
|
||||
// Add more other SDK related errors here if any in future.
|
||||
default:
|
||||
@@ -2519,7 +2606,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
||||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err)
|
||||
}
|
||||
|
||||
return apiErr
|
||||
@@ -2533,18 +2620,20 @@ func getAPIError(code APIErrorCode) APIError {
|
||||
return errorCodes.ToAPIErr(ErrInternalError)
|
||||
}
|
||||
|
||||
// getErrorResponse gets in standard error and resource value and
|
||||
// getAPIErrorResponse gets in standard error and resource value and
|
||||
// provides a encodable populated response values
|
||||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
return APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region(),
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
ActualObjectSize: err.ObjectSize,
|
||||
RangeRequested: err.RangeRequested,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
@@ -64,7 +63,7 @@ var toAPIErrorTests = []struct {
|
||||
}
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx := t.Context()
|
||||
for i, testCase := range toAPIErrorTests {
|
||||
errCode := toAPIErrorCode(ctx, testCase.err)
|
||||
if errCode != testCase.errCode {
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
@@ -30,7 +31,6 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
@@ -54,7 +54,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalSite.Region; region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
@@ -68,7 +68,7 @@ func encodeResponse(response interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xml.Header)
|
||||
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
bugLogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
return buf.Bytes()
|
||||
@@ -86,7 +86,7 @@ func encodeResponseList(response interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xxml.Header)
|
||||
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
bugLogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
return buf.Bytes()
|
||||
@@ -108,7 +108,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
||||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
@@ -136,7 +136,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
// Set tag count if object has tags
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tags.Count() > 0 {
|
||||
if tags != nil && tags.Count() > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
|
||||
if opts.Tagging {
|
||||
// This is MinIO only extension to return back tags along with the count.
|
||||
@@ -213,7 +213,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
if objInfo.IsRemote() {
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
|
||||
@@ -34,7 +34,8 @@ func TestNewRequestID(t *testing.T) {
|
||||
e = char
|
||||
|
||||
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
|
||||
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {
|
||||
isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')
|
||||
if !isAlnum {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
@@ -166,10 +166,11 @@ type Part struct {
|
||||
Size int64
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// ListPartsResponse - format for list parts response.
|
||||
@@ -192,6 +193,8 @@ type ListPartsResponse struct {
|
||||
IsTruncated bool
|
||||
|
||||
ChecksumAlgorithm string
|
||||
ChecksumType string
|
||||
|
||||
// List of parts.
|
||||
Parts []Part `xml:"Part"`
|
||||
}
|
||||
@@ -413,10 +416,11 @@ type CompleteMultipartUploadResponse struct {
|
||||
Key string
|
||||
ETag string
|
||||
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// DeleteError structure.
|
||||
@@ -516,7 +520,6 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
}
|
||||
case crypto.SSEC:
|
||||
m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
||||
|
||||
}
|
||||
|
||||
var toRemove []string
|
||||
@@ -544,7 +547,7 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
|
||||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
|
||||
owner := &Owner{
|
||||
@@ -573,7 +576,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -593,8 +596,6 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -634,7 +635,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
owner := &Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
@@ -654,7 +655,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -683,7 +684,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner *Owner
|
||||
if fetchOwner {
|
||||
@@ -707,7 +708,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
@@ -729,7 +730,6 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
for k, v := range cleanReservedKeys(object.UserDefined) {
|
||||
content.UserMetadata.Set(k, v)
|
||||
}
|
||||
content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat))
|
||||
content.Internal = &ObjectInternalInfo{
|
||||
K: object.DataBlocks,
|
||||
M: object.ParityBlocks,
|
||||
@@ -789,18 +789,19 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
}
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
|
||||
cs := oi.decryptChecksums(0)
|
||||
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
|
||||
cs, _ := oi.decryptChecksums(0, h)
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: cs[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: cs[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: cs[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()],
|
||||
ChecksumCRC64NVME: cs[hash.ChecksumCRC64NVME.String()],
|
||||
}
|
||||
return c
|
||||
}
|
||||
@@ -828,6 +829,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.IsTruncated = partsInfo.IsTruncated
|
||||
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
|
||||
listPartsResponse.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm
|
||||
listPartsResponse.ChecksumType = partsInfo.ChecksumType
|
||||
|
||||
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
|
||||
for index, part := range partsInfo.Parts {
|
||||
@@ -840,6 +842,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.ChecksumCRC32C = part.ChecksumCRC32C
|
||||
newPart.ChecksumSHA1 = part.ChecksumSHA1
|
||||
newPart.ChecksumSHA256 = part.ChecksumSHA256
|
||||
newPart.ChecksumCRC64NVME = part.ChecksumCRC64NVME
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
@@ -891,7 +894,7 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
||||
}
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if statusCode < 100 || statusCode > 999 {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
|
||||
bugLogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
@@ -946,22 +949,23 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
|
||||
// writeErrorResponse writes error headers
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
if err.HTTPStatusCode == http.StatusServiceUnavailable {
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
switch err.HTTPStatusCode {
|
||||
case http.StatusServiceUnavailable, http.StatusTooManyRequests:
|
||||
// Set retry-after header to indicate user-agents to retry request after 60 seconds.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "60")
|
||||
}
|
||||
|
||||
switch err.Code {
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region())
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region())
|
||||
}
|
||||
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
bugLogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
err.HTTPStatusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
consoleapi "github.com/minio/console/api"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
@@ -64,7 +64,7 @@ func setObjectLayer(o ObjectLayer) {
|
||||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
// objectAPIHandlers implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
}
|
||||
@@ -227,13 +227,13 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
|
||||
}
|
||||
|
||||
// Skip wrapping with the gzip middleware if specified.
|
||||
var gzippedHandler http.HandlerFunc = tracedHandler
|
||||
gzippedHandler := tracedHandler
|
||||
if !handlerFlags.has(noGZS3HFlag) {
|
||||
gzippedHandler = gzipHandler(gzippedHandler)
|
||||
}
|
||||
|
||||
// Skip wrapping with throttling middleware if specified.
|
||||
var throttledHandler http.HandlerFunc = gzippedHandler
|
||||
throttledHandler := gzippedHandler
|
||||
if !handlerFlags.has(noThrottleS3HFlag) {
|
||||
throttledHandler = maxClients(throttledHandler)
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
Queries("notification", "")
|
||||
// ListenNotification
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
// ResetBucketReplicationStatus - MinIO extension API
|
||||
router.Methods(http.MethodGet).
|
||||
@@ -456,6 +456,14 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// PutBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodPut).
|
||||
HandlerFunc(s3APIMiddleware(api.PutBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// DeleteBucketCors - this is a dummy call.
|
||||
router.Methods(http.MethodDelete).
|
||||
HandlerFunc(s3APIMiddleware(api.DeleteBucketCorsHandler)).
|
||||
Queries("cors", "")
|
||||
// GetBucketWebsiteHandler - this is a dummy call.
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketWebsiteHandler)).
|
||||
@@ -472,6 +480,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketLoggingHandler)).
|
||||
Queries("logging", "")
|
||||
|
||||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).
|
||||
HandlerFunc(s3APIMiddleware(api.GetBucketTaggingHandler)).
|
||||
@@ -615,7 +624,7 @@ func registerAPIRouter(router *mux.Router) {
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)).
|
||||
HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)).
|
||||
Queries("events", "{events:.*}")
|
||||
|
||||
// ListBuckets
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -41,7 +41,7 @@ import (
|
||||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/mcontext"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
@@ -96,7 +96,7 @@ func isRequestSignStreamingTrailerV4(r *http.Request) bool {
|
||||
// Verify if the request has AWS Streaming Signature Version '4', with unsigned content and trailer.
|
||||
func isRequestUnsignedTrailerV4(r *http.Request) bool {
|
||||
return r.Header.Get(xhttp.AmzContentSha256) == unsignedPayloadTrailer &&
|
||||
r.Method == http.MethodPut && strings.Contains(r.Header.Get(xhttp.ContentEncoding), streamingContentEncoding)
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
||||
// Authorization type.
|
||||
@@ -126,7 +126,7 @@ func getRequestAuthType(r *http.Request) (at authType) {
|
||||
var err error
|
||||
r.Form, err = url.ParseQuery(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
logger.LogIf(r.Context(), err)
|
||||
authNLogIf(r.Context(), err)
|
||||
return authTypeUnknown
|
||||
}
|
||||
}
|
||||
@@ -162,7 +162,6 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned {
|
||||
|
||||
// Get credential information from the request.
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
@@ -178,7 +177,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
@@ -222,7 +221,7 @@ func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
return claims
|
||||
}
|
||||
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) {
|
||||
func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error) {
|
||||
// JWT token for x-amz-security-token is signed with admin
|
||||
// secret key, temporary credentials become invalid if
|
||||
// server admin credentials change. This is done to ensure
|
||||
@@ -244,7 +243,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
|
||||
// If AuthZPlugin is set, return without any further checks.
|
||||
if newGlobalAuthZPluginFn() != nil {
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Check if a session policy is set. If so, decode it here.
|
||||
@@ -257,18 +256,22 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
||||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(GlobalContext, err, logger.ErrorKind)
|
||||
authNLogIf(GlobalContext, err, logger.ErrorKind)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
}
|
||||
|
||||
return claims.Map(), nil
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwtClaims.Map(), nil
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
@@ -319,7 +322,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
|
||||
if err != nil {
|
||||
return nil, toAPIErrorCode(r.Context(), err)
|
||||
}
|
||||
return claims, ErrNone
|
||||
return claims.Map(), ErrNone
|
||||
}
|
||||
|
||||
claims := xjwt.NewMapClaims()
|
||||
@@ -353,14 +356,14 @@ func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action po
|
||||
|
||||
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
if logger.GetReqInfo(ctx) == nil {
|
||||
logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
|
||||
bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch getRequestAuthType(r) {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
case authTypeUnknown, authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypePresignedV2, authTypeSignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
@@ -368,7 +371,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
@@ -384,7 +387,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
// region is valid only for CreateBucketAction.
|
||||
var region string
|
||||
@@ -392,7 +395,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
||||
// To extract region from XML in request body, get copy of request body.
|
||||
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
authZLogIf(ctx, err, logger.ErrorKind)
|
||||
return ErrMalformedXML
|
||||
}
|
||||
|
||||
@@ -671,32 +674,6 @@ func setAuthMiddleware(h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
var s3Err APIErrorCode
|
||||
switch atype {
|
||||
case authTypeUnknown, authTypeStreamingSigned:
|
||||
return cred, owner, ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
||||
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) {
|
||||
var retSet bool
|
||||
if cred.AccessKey == "" {
|
||||
@@ -745,14 +722,20 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
||||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrSignatureVersionNotSupported
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
|
||||
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer:
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
case authTypeStreamingUnsignedTrailer:
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err == ErrMissingFields {
|
||||
// Could be anonymous. cred + owner is zero value.
|
||||
s3Err = ErrNone
|
||||
}
|
||||
}
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
@@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
@@ -413,7 +413,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
@@ -443,14 +443,14 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
||||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAdminSignature(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
objLayer, fsDir, err := prepareFS(ctx)
|
||||
|
||||
@@ -25,8 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
@@ -101,16 +100,17 @@ func waitForLowHTTPReq() {
|
||||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
bgSeq := newBgHealSequence()
|
||||
// Run the background healer
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI)
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(bgSeq, objAPI)
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *healSequence) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
@@ -135,8 +135,18 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
if task.respCh != nil {
|
||||
task.respCh <- healResult{result: res, err: err}
|
||||
continue
|
||||
}
|
||||
|
||||
// when respCh is not set caller is not waiting but we
|
||||
// update the relevant metrics for them
|
||||
if bgSeq != nil {
|
||||
if err == nil {
|
||||
bgSeq.countHealed(res.Type)
|
||||
} else {
|
||||
bgSeq.countFailed(res.Type)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -148,7 +158,7 @@ func newHealRoutine() *healRoutine {
|
||||
|
||||
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
|
||||
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
|
||||
bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
|
||||
} else {
|
||||
workers = numHealers
|
||||
}
|
||||
|
||||
@@ -33,8 +33,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -73,10 +72,12 @@ type healingTracker struct {
|
||||
|
||||
// Numbers when current bucket started healing,
|
||||
// for resuming with correct numbers.
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeItemsHealed uint64 `json:"-"`
|
||||
ResumeItemsFailed uint64 `json:"-"`
|
||||
ResumeItemsSkipped uint64 `json:"-"`
|
||||
ResumeBytesDone uint64 `json:"-"`
|
||||
ResumeBytesFailed uint64 `json:"-"`
|
||||
ResumeBytesSkipped uint64 `json:"-"`
|
||||
|
||||
// Filled on startup/restarts.
|
||||
QueuedBuckets []string
|
||||
@@ -89,6 +90,11 @@ type healingTracker struct {
|
||||
|
||||
ItemsSkipped uint64
|
||||
BytesSkipped uint64
|
||||
|
||||
RetryAttempts uint64
|
||||
|
||||
Finished bool // finished healing, whether with errors or not
|
||||
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
@@ -142,14 +148,34 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h healingTracker) getLastUpdate() time.Time {
|
||||
func (h *healingTracker) resetHealing() {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
h.ItemsHealed = 0
|
||||
h.ItemsFailed = 0
|
||||
h.BytesDone = 0
|
||||
h.BytesFailed = 0
|
||||
h.ResumeItemsHealed = 0
|
||||
h.ResumeItemsFailed = 0
|
||||
h.ResumeBytesDone = 0
|
||||
h.ResumeBytesFailed = 0
|
||||
h.ItemsSkipped = 0
|
||||
h.BytesSkipped = 0
|
||||
|
||||
h.HealedBuckets = nil
|
||||
h.Object = ""
|
||||
h.Bucket = ""
|
||||
}
|
||||
|
||||
func (h *healingTracker) getLastUpdate() time.Time {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
return h.LastUpdate
|
||||
}
|
||||
|
||||
func (h healingTracker) getBucket() string {
|
||||
func (h *healingTracker) getBucket() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -163,7 +189,7 @@ func (h *healingTracker) setBucket(bucket string) {
|
||||
h.Bucket = bucket
|
||||
}
|
||||
|
||||
func (h healingTracker) getObject() string {
|
||||
func (h *healingTracker) getObject() string {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
@@ -197,9 +223,6 @@ func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) {
|
||||
// update will update the tracker on the disk.
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID)
|
||||
}
|
||||
h.mu.Lock()
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
@@ -261,8 +284,10 @@ func (h *healingTracker) resume() {
|
||||
|
||||
h.ItemsHealed = h.ResumeItemsHealed
|
||||
h.ItemsFailed = h.ResumeItemsFailed
|
||||
h.ItemsSkipped = h.ResumeItemsSkipped
|
||||
h.BytesDone = h.ResumeBytesDone
|
||||
h.BytesFailed = h.ResumeBytesFailed
|
||||
h.BytesSkipped = h.ResumeBytesSkipped
|
||||
}
|
||||
|
||||
// bucketDone should be called when a bucket is done healing.
|
||||
@@ -273,8 +298,10 @@ func (h *healingTracker) bucketDone(bucket string) {
|
||||
|
||||
h.ResumeItemsHealed = h.ItemsHealed
|
||||
h.ResumeItemsFailed = h.ItemsFailed
|
||||
h.ResumeItemsSkipped = h.ItemsSkipped
|
||||
h.ResumeBytesDone = h.BytesDone
|
||||
h.ResumeBytesFailed = h.BytesFailed
|
||||
h.ResumeBytesSkipped = h.BytesSkipped
|
||||
h.HealedBuckets = append(h.HealedBuckets, bucket)
|
||||
for i, b := range h.QueuedBuckets {
|
||||
if b == bucket {
|
||||
@@ -323,6 +350,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
PoolIndex: h.PoolIndex,
|
||||
SetIndex: h.SetIndex,
|
||||
DiskIndex: h.DiskIndex,
|
||||
Finished: h.Finished,
|
||||
Path: h.Path,
|
||||
Started: h.Started.UTC(),
|
||||
LastUpdate: h.LastUpdate.UTC(),
|
||||
@@ -338,6 +366,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
|
||||
Object: h.Object,
|
||||
QueuedBuckets: h.QueuedBuckets,
|
||||
HealedBuckets: h.HealedBuckets,
|
||||
RetryAttempts: h.RetryAttempts,
|
||||
|
||||
ObjectsHealed: h.ItemsHealed, // Deprecated July 2021
|
||||
ObjectsFailed: h.ItemsFailed, // Deprecated July 2021
|
||||
@@ -352,24 +381,26 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
|
||||
}
|
||||
|
||||
initBackgroundHealing(ctx, objAPI) // start quick background healing
|
||||
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn {
|
||||
if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn {
|
||||
globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...)
|
||||
go monitorLocalDisksAndHeal(ctx, z)
|
||||
}
|
||||
|
||||
go globalMRFState.startMRFPersistence()
|
||||
go globalMRFState.healRoutine(z)
|
||||
}
|
||||
|
||||
func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
globalLocalDrivesMu.RLock()
|
||||
localDrives := cloneDrives(globalLocalDrives)
|
||||
localDrives := cloneDrives(globalLocalDrivesMap)
|
||||
globalLocalDrivesMu.RUnlock()
|
||||
for _, disk := range localDrives {
|
||||
_, err := disk.GetDiskID()
|
||||
_, err := disk.DiskInfo(context.Background(), DiskInfoOptions{})
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
continue
|
||||
}
|
||||
if disk.Healing() != nil {
|
||||
if h := disk.Healing(); h != nil && !h.Finished {
|
||||
disksToHeal = append(disksToHeal, disk.Endpoint())
|
||||
}
|
||||
}
|
||||
@@ -383,6 +414,8 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
||||
|
||||
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
|
||||
var errRetryHealing = errors.New("some items failed to heal, we will retry healing this drive again")
|
||||
|
||||
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
|
||||
poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx
|
||||
disk := getStorageViaEndpoint(endpoint)
|
||||
@@ -390,6 +423,17 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return fmt.Errorf("Unexpected error disk must be initialized by now after formatting: %s", endpoint)
|
||||
}
|
||||
|
||||
_, err := disk.DiskInfo(ctx, DiskInfoOptions{})
|
||||
if err != nil {
|
||||
if errors.Is(err, errDriveIsRoot) {
|
||||
// This is a root drive, ignore and move on
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, errUnformattedDisk) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Prevent parallel erasure set healing
|
||||
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
|
||||
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
|
||||
@@ -409,11 +453,11 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
return nil
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
|
||||
healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
|
||||
tracker = initHealingTracker(disk, mustGetUUID())
|
||||
}
|
||||
|
||||
logger.Event(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
|
||||
healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
|
||||
// Buckets data are dispersed in multiple pools/sets, make
|
||||
@@ -452,19 +496,37 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Event(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
// if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up.
|
||||
if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 {
|
||||
tracker.RetryAttempts++
|
||||
|
||||
if len(tracker.QueuedBuckets) > 0 {
|
||||
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
tracker.resetHealing()
|
||||
bugLogIf(ctx, tracker.update(ctx))
|
||||
|
||||
return errRetryHealing
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
tracker.RetryAttempts, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
} else {
|
||||
if tracker.RetryAttempts > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk,
|
||||
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
} else {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
}
|
||||
}
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
||||
if tracker.HealID == "" { // HealID was empty only before Feb 2023
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
bugLogIf(ctx, tracker.delete(ctx))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -482,12 +544,13 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
t, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
healingLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if t.HealID == tracker.HealID {
|
||||
t.delete(ctx)
|
||||
t.Finished = true
|
||||
t.update(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -517,7 +580,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
||||
// Reformat disks immediately
|
||||
_, err := z.HealFormat(context.Background(), false)
|
||||
if err != nil && !errors.Is(err, errNoHealRequired) {
|
||||
logger.LogIf(ctx, err)
|
||||
healingLogIf(ctx, err)
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
continue
|
||||
@@ -529,7 +592,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
||||
if err := healFreshDisk(ctx, z, disk); err != nil {
|
||||
globalBackgroundHealState.setDiskHealingStatus(disk, false)
|
||||
timedout := OperationTimedOut{}
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) {
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) && !errors.Is(err, errRetryHealing) {
|
||||
printEndpointError(disk, err, false)
|
||||
}
|
||||
return
|
||||
|
||||
@@ -132,6 +132,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
@@ -144,6 +150,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
@@ -200,6 +212,18 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -213,9 +237,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 25
|
||||
// map header, size 29
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -394,6 +418,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeItemsSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeItemsSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesDone"
|
||||
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
if err != nil {
|
||||
@@ -414,6 +448,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
// write "ResumeBytesSkipped"
|
||||
err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.ResumeBytesSkipped)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "QueuedBuckets"
|
||||
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
if err != nil {
|
||||
@@ -478,15 +522,35 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "RetryAttempts"
|
||||
err = en.Append(0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.RetryAttempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "Finished"
|
||||
err = en.Append(0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteBool(z.Finished)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 25
|
||||
// map header, size 29
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
@@ -539,12 +603,18 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ResumeItemsFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsFailed)
|
||||
// string "ResumeItemsSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeItemsSkipped)
|
||||
// string "ResumeBytesDone"
|
||||
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesDone)
|
||||
// string "ResumeBytesFailed"
|
||||
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
|
||||
// string "ResumeBytesSkipped"
|
||||
o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.ResumeBytesSkipped)
|
||||
// string "QueuedBuckets"
|
||||
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
|
||||
@@ -566,6 +636,12 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "BytesSkipped"
|
||||
o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesSkipped)
|
||||
// string "RetryAttempts"
|
||||
o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
o = msgp.AppendUint64(o, z.RetryAttempts)
|
||||
// string "Finished"
|
||||
o = append(o, 0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64)
|
||||
o = msgp.AppendBool(o, z.Finished)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -695,6 +771,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeItemsFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeItemsSkipped":
|
||||
z.ResumeItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeItemsSkipped")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesDone":
|
||||
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
@@ -707,6 +789,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "ResumeBytesFailed")
|
||||
return
|
||||
}
|
||||
case "ResumeBytesSkipped":
|
||||
z.ResumeBytesSkipped, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "ResumeBytesSkipped")
|
||||
return
|
||||
}
|
||||
case "QueuedBuckets":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
@@ -763,6 +851,18 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "Finished":
|
||||
z.Finished, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Finished")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -777,7 +877,7 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *healingTracker) Msgsize() (s int) {
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.QueuedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
|
||||
}
|
||||
@@ -785,6 +885,6 @@ func (z *healingTracker) Msgsize() (s int) {
|
||||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 9 + msgp.BoolSize
|
||||
return
|
||||
}
|
||||
|
||||
@@ -33,10 +33,10 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -117,7 +117,7 @@ func (p BatchJobExpirePurge) Validate() error {
|
||||
// BatchJobExpireFilter holds all the filters currently supported for batch replication
|
||||
type BatchJobExpireFilter struct {
|
||||
line, col int
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
@@ -156,14 +156,14 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
}
|
||||
default:
|
||||
// we should never come here, Validate should have caught this.
|
||||
logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
|
||||
batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
|
||||
return false
|
||||
}
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
|
||||
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan.D() {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -195,8 +195,8 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
|
||||
for _, kv := range ef.Metadata {
|
||||
// Object (version) must match all x-amz-meta and
|
||||
@@ -281,7 +281,7 @@ type BatchJobExpire struct {
|
||||
line, col int
|
||||
APIVersion string `yaml:"apiVersion" json:"apiVersion"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"`
|
||||
Retry BatchJobRetry `yaml:"retry" json:"retry"`
|
||||
Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"`
|
||||
@@ -289,6 +289,16 @@ type BatchJobExpire struct {
|
||||
|
||||
var _ yaml.Unmarshaler = &BatchJobExpire{}
|
||||
|
||||
// RedactSensitive will redact any sensitive information in b.
|
||||
func (r *BatchJobExpire) RedactSensitive() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if r.NotificationCfg.Token != "" {
|
||||
r.NotificationCfg.Token = redactedText
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalYAML - BatchJobExpire extends default unmarshal to extract line, col information.
|
||||
func (r *BatchJobExpire) UnmarshalYAML(val *yaml.Node) error {
|
||||
type expireJob BatchJobExpire
|
||||
@@ -321,7 +331,7 @@ func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error {
|
||||
req.Header.Set("Authorization", r.NotificationCfg.Token)
|
||||
}
|
||||
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport}
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport()}
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -341,8 +351,24 @@ func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versio
|
||||
PrefixEnabledFn: vc.PrefixEnabled,
|
||||
VersionSuspended: vc.Suspended(),
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts)
|
||||
return errs
|
||||
|
||||
allErrs := make([]error, 0, len(objsToDel))
|
||||
|
||||
for {
|
||||
count := len(objsToDel)
|
||||
if count == 0 {
|
||||
break
|
||||
}
|
||||
if count > maxDeleteList {
|
||||
count = maxDeleteList
|
||||
}
|
||||
_, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel[:count], opts)
|
||||
allErrs = append(allErrs, errs...)
|
||||
// Next batch of deletion
|
||||
objsToDel = objsToDel[count:]
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -372,9 +398,12 @@ func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) {
|
||||
|
||||
func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) {
|
||||
vc, _ := globalBucketVersioningSys.Get(r.Bucket)
|
||||
retryAttempts := r.Retry.Attempts
|
||||
retryAttempts := job.Expire.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchExpireJobDefaultRetries
|
||||
}
|
||||
delay := job.Expire.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchExpireJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@@ -395,12 +424,12 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
go func(toExpire []expireObjInfo) {
|
||||
defer wk.Give()
|
||||
|
||||
toExpireAll := make([]ObjectInfo, 0, len(toExpire))
|
||||
toExpireAll := make([]expireObjInfo, 0, len(toExpire))
|
||||
toDel := make([]ObjectToDelete, 0, len(toExpire))
|
||||
oiCache := newObjInfoCache()
|
||||
for _, exp := range toExpire {
|
||||
if exp.ExpireAll {
|
||||
toExpireAll = append(toExpireAll, exp.ObjectInfo)
|
||||
toExpireAll = append(toExpireAll, exp)
|
||||
continue
|
||||
}
|
||||
// Cache ObjectInfo value via pointers for
|
||||
@@ -416,14 +445,14 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
oiCache.Add(od, &exp.ObjectInfo)
|
||||
}
|
||||
|
||||
var done bool
|
||||
// DeleteObject(deletePrefix: true) to expire all versions of an object
|
||||
for _, exp := range toExpireAll {
|
||||
var success bool
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
ri.trackMultipleObjectVersions(exp, success)
|
||||
return
|
||||
default:
|
||||
}
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
@@ -433,21 +462,14 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
})
|
||||
if err != nil {
|
||||
stopFn(exp, err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s due to %v (attempts=%d)", exp.Bucket, exp.Name, err, attempts))
|
||||
} else {
|
||||
stopFn(exp, err)
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
ri.trackMultipleObjectVersions(r.Bucket, exp, success)
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if done {
|
||||
return
|
||||
ri.trackMultipleObjectVersions(exp, success)
|
||||
}
|
||||
|
||||
// DeleteMultiple objects
|
||||
@@ -465,25 +487,25 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
copy(toDelCopy, toDel)
|
||||
var failed int
|
||||
errs := r.Expire(ctx, api, vc, toDel)
|
||||
// reslice toDel in preparation for next retry
|
||||
// attempt
|
||||
// reslice toDel in preparation for next retry attempt
|
||||
toDel = toDel[:0]
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
stopFn(toDelCopy[i], err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID,
|
||||
err, attempts))
|
||||
failed++
|
||||
if attempts == retryAttempts { // all retry attempts failed, record failure
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false)
|
||||
}
|
||||
} else {
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
|
||||
}
|
||||
if attempts != retryAttempts {
|
||||
// retry
|
||||
toDel = append(toDel, toDelCopy[i])
|
||||
}
|
||||
} else {
|
||||
stopFn(toDelCopy[i], nil)
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true)
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -505,7 +527,8 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
||||
|
||||
type expireObjInfo struct {
|
||||
ObjectInfo
|
||||
ExpireAll bool
|
||||
ExpireAll bool
|
||||
DeleteMarkerCount int64
|
||||
}
|
||||
|
||||
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
|
||||
@@ -515,7 +538,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -535,40 +558,58 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
ctx, cancelCause := context.WithCancelCause(ctx)
|
||||
defer cancelCause(nil)
|
||||
|
||||
results := make(chan ObjectInfo, workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
}); err != nil {
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
results := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
go func() {
|
||||
prefixes := r.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixResultCh := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
err := api.Walk(ctx, r.Bucket, prefix, prefixResultCh, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
VersionsSort: WalkVersionsSortDesc,
|
||||
})
|
||||
if err != nil {
|
||||
cancelCause(err)
|
||||
xioutil.SafeClose(results)
|
||||
return
|
||||
}
|
||||
for result := range prefixResultCh {
|
||||
results <- result
|
||||
}
|
||||
}
|
||||
xioutil.SafeClose(results)
|
||||
}()
|
||||
|
||||
// Goroutine to periodically save batch-expire job's in-memory state
|
||||
saverQuitCh := make(chan struct{})
|
||||
go func() {
|
||||
saveTicker := time.NewTicker(10 * time.Second)
|
||||
defer saveTicker.Stop()
|
||||
for {
|
||||
quit := false
|
||||
after := time.Minute
|
||||
for !quit {
|
||||
select {
|
||||
case <-saveTicker.C:
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
case <-ctx.Done():
|
||||
// persist in-memory state immediately before exiting due to context cancellation.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
|
||||
quit = true
|
||||
case <-saverQuitCh:
|
||||
// persist in-memory state immediately to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
quit = true
|
||||
}
|
||||
|
||||
if quit {
|
||||
// save immediately if we are quitting
|
||||
after = 0
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(GlobalContext, 30*time.Second) // independent context
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, after, job))
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -584,69 +625,115 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
matchedFilter BatchJobExpireFilter
|
||||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
failed bool
|
||||
done bool
|
||||
)
|
||||
for result := range results {
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.IsLatest {
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
|
||||
var done bool
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
deleteMarkerCountMap := map[string]int64{}
|
||||
pushToExpire := func() {
|
||||
// set preObject deleteMarkerCount
|
||||
if len(toDel) > 0 {
|
||||
lastDelIndex := len(toDel) - 1
|
||||
lastDel := toDel[lastDelIndex]
|
||||
if lastDel.ExpireAll {
|
||||
toDel[lastDelIndex].DeleteMarkerCount = deleteMarkerCountMap[lastDel.Name]
|
||||
// delete the key
|
||||
delete(deleteMarkerCountMap, lastDel.Name)
|
||||
}
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
prevObj = result
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
xfer := make([]expireObjInfo, len(toDel))
|
||||
copy(xfer, toDel)
|
||||
select {
|
||||
case expireCh <- xfer:
|
||||
toDel = toDel[:0] // resetting toDel
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
}
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
})
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case result, ok := <-results:
|
||||
if !ok {
|
||||
done = true
|
||||
break
|
||||
}
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
if result.Item.DeleteMarker {
|
||||
deleteMarkerCountMap[result.Item.Name]++
|
||||
}
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.Item.IsLatest {
|
||||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
if prevObj.Name != result.Item.Name {
|
||||
// switch the object
|
||||
pushToExpire()
|
||||
}
|
||||
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
versionsCount++
|
||||
} else {
|
||||
// switch the object
|
||||
pushToExpire()
|
||||
// a file switched with no LatestVersion, logging it
|
||||
batchLogIf(ctx, fmt.Errorf("skipping object %s, no latest version found", result.Item.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
if versionsCount <= matchedFilter.Purge.RetainVersions {
|
||||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
pushToExpire()
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if context.Cause(ctx) != nil {
|
||||
xioutil.SafeClose(expireCh)
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
pushToExpire()
|
||||
// Send any remaining objects downstream
|
||||
if len(toDel) > 0 {
|
||||
select {
|
||||
@@ -659,8 +746,8 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
<-expireDoneCh // waits for the expire goroutine to complete
|
||||
wk.Wait() // waits for all expire workers to retire
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
// Close the saverQuitCh - this also triggers saving in-memory state
|
||||
@@ -670,7 +757,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
||||
// Notify expire jobs final status to the configured endpoint
|
||||
buf, _ := json.Marshal(ri)
|
||||
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
|
||||
batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -39,7 +39,7 @@ func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -114,7 +114,7 @@ func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -171,7 +171,11 @@ func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "NotificationCfg"
|
||||
o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67)
|
||||
o, err = z.NotificationCfg.MarshalMsg(o)
|
||||
@@ -230,7 +234,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -280,7 +284,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpire) Msgsize() (s int) {
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Rules {
|
||||
s += z.Rules[za0001].Msgsize()
|
||||
}
|
||||
@@ -306,7 +310,7 @@ func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -433,7 +437,7 @@ func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -544,7 +548,11 @@ func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 8
|
||||
// string "OlderThan"
|
||||
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedBefore"
|
||||
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
|
||||
if z.CreatedBefore == nil {
|
||||
@@ -613,7 +621,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -734,7 +742,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobExpireFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 14
|
||||
s = 1 + 10 + z.OlderThan.Msgsize() + 14
|
||||
if z.CreatedBefore == nil {
|
||||
s += msgp.NilSize
|
||||
} else {
|
||||
|
||||
@@ -18,9 +18,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobExpire(t *testing.T) {
|
||||
@@ -32,7 +33,7 @@ expire: # Expire objects that match a condition
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 70h # match objects older than this value
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
@@ -64,8 +65,61 @@ expire: # Expire objects that match a condition
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
|
||||
err := yaml.Unmarshal([]byte(expireYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Expire.Prefix.F(), []string{"myprefix"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
|
||||
multiPrefixExpireYaml := `
|
||||
expire: # Expire objects that match a condition
|
||||
apiVersion: v1
|
||||
bucket: mybucket # Bucket where this batch job will expire matching objects from
|
||||
prefix: # (Optional) Prefix under which this job will expire objects matching the rules below.
|
||||
- myprefix
|
||||
- myprefix1
|
||||
rules:
|
||||
- type: object # regular objects with zero or more older versions
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 7d10h # match objects older than this value
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
tags:
|
||||
- key: name
|
||||
value: pick* # match objects with tag 'name', all values starting with 'pick'
|
||||
metadata:
|
||||
- key: content-type
|
||||
value: image/* # match objects with 'content-type', all values starting with 'image/'
|
||||
size:
|
||||
lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB)
|
||||
greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB)
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object.
|
||||
|
||||
- type: deleted # objects with delete marker as their latest version
|
||||
name: NAME # match object names that satisfy the wildcard expression.
|
||||
olderThan: 10h # match objects older than this value (e.g. 7d10h31s)
|
||||
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
|
||||
purge:
|
||||
# retainVersions: 0 # (default) delete all versions of the object. This option is the fastest.
|
||||
# retainVersions: 5 # keep the latest 5 versions of the object including delete markers.
|
||||
|
||||
notify:
|
||||
endpoint: https://notify.endpoint # notification endpoint to receive job completion status
|
||||
token: Bearer xxxxx # optional authentication token for the notification endpoint
|
||||
|
||||
retry:
|
||||
attempts: 10 # number of retries for the job before giving up
|
||||
delay: 500ms # least amount of delay between each retry
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixExpireYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Expire.Prefix.F(), []string{"myprefix", "myprefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-expire yaml")
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,89 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobPrefix) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadArrayHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z BatchJobPrefix) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = en.WriteArrayHeader(uint32(len(z)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
for zb0003 := range z {
|
||||
err = en.WriteString(z[zb0003])
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0003)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z BatchJobPrefix) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
o = msgp.AppendArrayHeader(o, uint32(len(z)))
|
||||
for zb0003 := range z {
|
||||
o = msgp.AppendString(o, z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (z *BatchJobPrefix) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if cap((*z)) >= int(zb0002) {
|
||||
(*z) = (*z)[:zb0002]
|
||||
} else {
|
||||
(*z) = make(BatchJobPrefix, zb0002)
|
||||
}
|
||||
for zb0001 := range *z {
|
||||
(*z)[zb0001], bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, zb0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z BatchJobPrefix) Msgsize() (s int) {
|
||||
s = msgp.ArrayHeaderSize
|
||||
for zb0003 := range z {
|
||||
s += msgp.StringPrefixSize + len(z[zb0003])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
var field []byte
|
||||
@@ -419,6 +502,12 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
@@ -492,9 +581,9 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// write "v"
|
||||
err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -553,6 +642,16 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "at"
|
||||
err = en.Append(0xa2, 0x61, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.Attempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
// write "cmp"
|
||||
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
|
||||
if err != nil {
|
||||
@@ -659,9 +758,9 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// string "v"
|
||||
o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
o = msgp.AppendInt(o, z.Version)
|
||||
// string "jid"
|
||||
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
|
||||
@@ -678,6 +777,9 @@ func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// string "ra"
|
||||
o = append(o, 0xa2, 0x72, 0x61)
|
||||
o = msgp.AppendInt(o, z.RetryAttempts)
|
||||
// string "at"
|
||||
o = append(o, 0xa2, 0x61, 0x74)
|
||||
o = msgp.AppendInt(o, z.Attempts)
|
||||
// string "cmp"
|
||||
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
|
||||
o = msgp.AppendBool(o, z.Complete)
|
||||
@@ -765,6 +867,12 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
@@ -839,6 +947,6 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *batchJobInfo) Msgsize() (s int) {
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,6 +9,119 @@ import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsgBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecodeBatchJobPrefix(t *testing.T) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Log("WARNING: TestEncodeDecodeBatchJobPrefix Msgsize() is inaccurate")
|
||||
}
|
||||
|
||||
vn := BatchJobPrefix{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecodeBatchJobPrefix(b *testing.B) {
|
||||
v := BatchJobPrefix{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBatchJobRequest(t *testing.T) {
|
||||
v := BatchJobRequest{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
|
||||
75
cmd/batch-handlers_test.go
Normal file
75
cmd/batch-handlers_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestBatchJobPrefix_UnmarshalYAML(t *testing.T) {
|
||||
type args struct {
|
||||
yamlStr string
|
||||
}
|
||||
type PrefixTemp struct {
|
||||
Prefix BatchJobPrefix `yaml:"prefix"`
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
b PrefixTemp
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "test1",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix: "foo"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "test2",
|
||||
b: PrefixTemp{},
|
||||
args: args{
|
||||
yamlStr: `
|
||||
prefix:
|
||||
- "foo"
|
||||
- "bar"
|
||||
`,
|
||||
},
|
||||
want: []string{"foo", "bar"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := yaml.Unmarshal([]byte(tt.args.yamlStr), &tt.b); (err != nil) != tt.wantErr {
|
||||
t.Errorf("UnmarshalYAML() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if !slices.Equal(tt.b.Prefix.F(), tt.want) {
|
||||
t.Errorf("UnmarshalYAML() = %v, want %v", tt.b.Prefix.F(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"time"
|
||||
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v3/xtime"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE
|
||||
@@ -65,12 +65,12 @@ import (
|
||||
|
||||
// BatchReplicateFilter holds all the filters currently supported for batch replication
|
||||
type BatchReplicateFilter struct {
|
||||
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
NewerThan xtime.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
|
||||
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
|
||||
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
|
||||
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
|
||||
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
|
||||
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
|
||||
}
|
||||
|
||||
// BatchJobReplicateFlags various configurations for replication job definition currently includes
|
||||
@@ -151,7 +151,7 @@ func (t BatchJobReplicateTarget) ValidPath() bool {
|
||||
type BatchJobReplicateSource struct {
|
||||
Type BatchJobReplicateResourceType `yaml:"type" json:"type"`
|
||||
Bucket string `yaml:"bucket" json:"bucket"`
|
||||
Prefix string `yaml:"prefix" json:"prefix"`
|
||||
Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"`
|
||||
Endpoint string `yaml:"endpoint" json:"endpoint"`
|
||||
Path string `yaml:"path" json:"path"`
|
||||
Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"`
|
||||
|
||||
@@ -411,7 +411,7 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, err = dc.ReadString()
|
||||
err = z.Prefix.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -514,7 +514,7 @@ func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Prefix)
|
||||
err = z.Prefix.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -600,7 +600,11 @@ func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, z.Bucket)
|
||||
// string "Prefix"
|
||||
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
|
||||
o = msgp.AppendString(o, z.Prefix)
|
||||
o, err = z.Prefix.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
}
|
||||
// string "Endpoint"
|
||||
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
|
||||
o = msgp.AppendString(o, z.Endpoint)
|
||||
@@ -664,7 +668,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
return
|
||||
}
|
||||
case "Prefix":
|
||||
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
|
||||
bts, err = z.Prefix.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Prefix")
|
||||
return
|
||||
@@ -742,7 +746,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error)
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchJobReplicateSource) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1409,13 +1413,13 @@ func (z *BatchReplicateFilter) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, err = dc.ReadDuration()
|
||||
err = z.NewerThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, err = dc.ReadDuration()
|
||||
err = z.OlderThan.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1489,7 +1493,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.NewerThan)
|
||||
err = z.NewerThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
@@ -1499,7 +1503,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteDuration(z.OlderThan)
|
||||
err = z.OlderThan.EncodeMsg(en)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1567,10 +1571,18 @@ func (z *BatchReplicateFilter) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
// map header, size 6
|
||||
// string "NewerThan"
|
||||
o = append(o, 0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.NewerThan)
|
||||
o, err = z.NewerThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
// string "OlderThan"
|
||||
o = append(o, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
|
||||
o = msgp.AppendDuration(o, z.OlderThan)
|
||||
o, err = z.OlderThan.MarshalMsg(o)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
}
|
||||
// string "CreatedAfter"
|
||||
o = append(o, 0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72)
|
||||
o = msgp.AppendTime(o, z.CreatedAfter)
|
||||
@@ -1619,13 +1631,13 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
switch msgp.UnsafeString(field) {
|
||||
case "NewerThan":
|
||||
z.NewerThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.NewerThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "NewerThan")
|
||||
return
|
||||
}
|
||||
case "OlderThan":
|
||||
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
|
||||
bts, err = z.OlderThan.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "OlderThan")
|
||||
return
|
||||
@@ -1694,7 +1706,7 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *BatchReplicateFilter) Msgsize() (s int) {
|
||||
s = 1 + 10 + msgp.DurationSize + 10 + msgp.DurationSize + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
s = 1 + 10 + z.NewerThan.Msgsize() + 10 + z.OlderThan.Msgsize() + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
|
||||
for za0001 := range z.Tags {
|
||||
s += z.Tags[za0001].Msgsize()
|
||||
}
|
||||
|
||||
182
cmd/batch-replicate_test.go
Normal file
182
cmd/batch-replicate_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestParseBatchJobReplicate(t *testing.T) {
|
||||
replicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: object-prefix1 # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var job BatchJobRequest
|
||||
err := yaml.Unmarshal([]byte(replicateYaml), &job)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(job.Replicate.Source.Prefix.F(), []string{"object-prefix1"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
multiPrefixReplicateYaml := `
|
||||
replicate:
|
||||
apiVersion: v1
|
||||
# source of the objects to be replicated
|
||||
source:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: # 'PREFIX' is optional
|
||||
- object-prefix1
|
||||
- object-prefix2
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
# endpoint: "http://127.0.0.1:9000"
|
||||
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
# credentials:
|
||||
# accessKey: minioadmin # Required
|
||||
# secretKey: minioadmin # Required
|
||||
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
snowball: # automatically activated if the source is local
|
||||
disable: true # optionally turn-off snowball archive transfer
|
||||
# batch: 100 # upto this many objects per archive
|
||||
# inmemory: true # indicates if the archive must be staged locally or in-memory
|
||||
# compress: false # S2/Snappy compressed archive
|
||||
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
|
||||
# skipErrs: false # skips any source side read() errors
|
||||
|
||||
# target where the objects must be replicated
|
||||
target:
|
||||
type: minio # valid values are "s3" or "minio"
|
||||
bucket: mytest
|
||||
prefix: stage # 'PREFIX' is optional
|
||||
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
|
||||
|
||||
# Either the 'source' or 'remote' *must* be the "local" deployment
|
||||
endpoint: "http://127.0.0.1:9001"
|
||||
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
|
||||
credentials:
|
||||
accessKey: minioadmin
|
||||
secretKey: minioadmin
|
||||
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
|
||||
|
||||
# NOTE: All flags are optional
|
||||
# - filtering criteria only applies for all source objects match the criteria
|
||||
# - configurable notification endpoints
|
||||
# - configurable retries for the job (each retry skips successfully previously replaced objects)
|
||||
flags:
|
||||
filter:
|
||||
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
|
||||
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
|
||||
# createdAfter: "date" # match objects created after "date"
|
||||
# createdBefore: "date" # match objects created before "date"
|
||||
|
||||
## NOTE: tags are not supported when "source" is remote.
|
||||
tags:
|
||||
- key: "name"
|
||||
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
|
||||
|
||||
metadata:
|
||||
- key: "content-type"
|
||||
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
|
||||
|
||||
# notify:
|
||||
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
|
||||
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
|
||||
#
|
||||
# retry:
|
||||
# attempts: 10 # number of retries for the job before giving up
|
||||
# delay: "500ms" # least amount of delay between each retry
|
||||
|
||||
`
|
||||
var multiPrefixJob BatchJobRequest
|
||||
err = yaml.Unmarshal([]byte(multiPrefixReplicateYaml), &multiPrefixJob)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml", err)
|
||||
}
|
||||
if !slices.Equal(multiPrefixJob.Replicate.Source.Prefix.F(), []string{"object-prefix1", "object-prefix2"}) {
|
||||
t.Fatal("Failed to parse batch-job-replicate yaml")
|
||||
}
|
||||
}
|
||||
@@ -33,9 +33,8 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
)
|
||||
|
||||
// keyrotate:
|
||||
@@ -96,6 +95,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
if e.Type == ssekms && spaces {
|
||||
return crypto.ErrInvalidEncryptionKeyID
|
||||
}
|
||||
|
||||
if e.Type == ssekms && GlobalKMS != nil {
|
||||
ctx := kms.Context{}
|
||||
if e.Context != "" {
|
||||
@@ -114,7 +114,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
e.kmsContext[k] = v
|
||||
}
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
JobType: string(job.Type()),
|
||||
StartTime: job.Started,
|
||||
}
|
||||
if err := ri.load(ctx, api, job); err != nil {
|
||||
if err := ri.loadOrInit(ctx, api, job); err != nil {
|
||||
return err
|
||||
}
|
||||
if ri.Complete {
|
||||
@@ -267,8 +267,12 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
lastObject := ri.Object
|
||||
|
||||
retryAttempts := job.KeyRotate.Flags.Retry.Attempts
|
||||
if retryAttempts <= 0 {
|
||||
retryAttempts = batchKeyRotateJobDefaultRetries
|
||||
}
|
||||
delay := job.KeyRotate.Flags.Retry.Delay
|
||||
if delay == 0 {
|
||||
if delay <= 0 {
|
||||
delay = batchKeyRotateJobDefaultRetryDelay
|
||||
}
|
||||
|
||||
@@ -354,10 +358,9 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
return err
|
||||
}
|
||||
|
||||
retryAttempts := ri.RetryAttempts
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan ObjectInfo, 100)
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
@@ -366,9 +369,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
|
||||
for result := range results {
|
||||
result := result
|
||||
failed := false
|
||||
for res := range results {
|
||||
if res.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
break
|
||||
}
|
||||
result := res.Item
|
||||
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
|
||||
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
|
||||
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
|
||||
@@ -378,21 +386,30 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
go func() {
|
||||
defer wk.Give()
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.KeyRotate(ctx, api, result); err != nil {
|
||||
stopFn(result, err)
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
success = false
|
||||
if attempts >= retryAttempts {
|
||||
auditOptions := AuditLogOptions{
|
||||
Event: "KeyRotate",
|
||||
APIName: "StartBatchJob",
|
||||
Bucket: result.Bucket,
|
||||
Object: result.Name,
|
||||
VersionID: result.VersionID,
|
||||
Error: err.Error(),
|
||||
}
|
||||
auditLogInternal(ctx, auditOptions)
|
||||
}
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success)
|
||||
ri.RetryAttempts = attempts
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
if success {
|
||||
break
|
||||
}
|
||||
@@ -408,14 +425,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}
|
||||
wk.Wait()
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
|
||||
if err := r.Notify(ctx, ri); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
cancel()
|
||||
@@ -476,8 +493,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Flags.Retry.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return r.Flags.Retry.Validate()
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -54,7 +54,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
res, err := obj.NewMultipartUpload(b.Context(), bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
}
|
||||
md5hex := getMD5Hash(textPartData)
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
|
||||
partInfo, err = obj.PutObjectPart(b.Context(), bucket, object, res.UploadID, j,
|
||||
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@@ -130,7 +130,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
|
||||
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@@ -146,7 +146,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
|
||||
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@@ -162,7 +162,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
|
||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
|
||||
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
|
||||
// create a temp Erasure/FS backend.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(b.Context())
|
||||
defer cancel()
|
||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
||||
if err != nil {
|
||||
@@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
|
||||
err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
||||
@@ -20,27 +20,40 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/ringbuffer"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow io.WriteCloser
|
||||
closeWithErr func(err error) error
|
||||
closeWithErr func(err error)
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose *sync.WaitGroup
|
||||
byteBuf []byte
|
||||
finished bool
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.finished {
|
||||
return 0, errors.New("bitrot write not allowed")
|
||||
}
|
||||
if int64(len(p)) > b.shardSize {
|
||||
return 0, errors.New("unexpected bitrot buffer size")
|
||||
}
|
||||
if int64(len(p)) < b.shardSize {
|
||||
b.finished = true
|
||||
}
|
||||
b.h.Reset()
|
||||
b.h.Write(p)
|
||||
hashBytes := b.h.Sum(nil)
|
||||
@@ -62,7 +75,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
// Close the underlying writer.
|
||||
// This will also flush the ring buffer if used.
|
||||
err := b.iow.Close()
|
||||
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
@@ -73,29 +89,34 @@ func (b *streamingBitrotWriter) Close() error {
|
||||
if b.canClose != nil {
|
||||
b.canClose.Wait()
|
||||
}
|
||||
|
||||
// Recycle the buffer.
|
||||
if b.byteBuf != nil {
|
||||
globalBytePoolCap.Load().Put(b.byteBuf)
|
||||
b.byteBuf = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
|
||||
// The output is written to the supplied writer w.
|
||||
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
|
||||
// Similar to CloseWithError on pipes we always return nil.
|
||||
return nil
|
||||
}}
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}}
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
buf := globalBytePoolCap.Load().Get()
|
||||
rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true)
|
||||
|
||||
bw := &streamingBitrotWriter{
|
||||
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: w.CloseWithError,
|
||||
iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: rb.CloseWithError,
|
||||
h: h,
|
||||
shardSize: shardSize,
|
||||
canClose: &sync.WaitGroup{},
|
||||
byteBuf: buf,
|
||||
}
|
||||
bw.canClose.Add(1)
|
||||
go func() {
|
||||
@@ -106,7 +127,7 @@ func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath stri
|
||||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r))
|
||||
rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb))
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
@@ -131,13 +152,7 @@ func (b *streamingBitrotReader) Close() error {
|
||||
}
|
||||
if closer, ok := b.rc.(io.Closer); ok {
|
||||
// drain the body for connection reuse at network layer.
|
||||
xhttp.DrainBody(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: b.rc,
|
||||
Closer: closeWrapper(func() error { return nil }),
|
||||
})
|
||||
xhttp.DrainBody(io.NopCloser(b.rc))
|
||||
return closer.Close()
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -128,14 +128,20 @@ func closeBitrotReaders(rs []io.ReaderAt) {
|
||||
}
|
||||
|
||||
// Close all the writers.
|
||||
func closeBitrotWriters(ws []io.Writer) {
|
||||
for _, w := range ws {
|
||||
if w != nil {
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
bw.Close()
|
||||
}
|
||||
func closeBitrotWriters(ws []io.Writer) []error {
|
||||
errs := make([]error, len(ws))
|
||||
for i, w := range ws {
|
||||
if w == nil {
|
||||
errs[i] = errDiskNotFound
|
||||
continue
|
||||
}
|
||||
if bw, ok := w.(io.Closer); ok {
|
||||
errs[i] = bw.Close()
|
||||
} else {
|
||||
errs[i] = nil
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
|
||||
@@ -178,7 +184,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
|
||||
return errFileCorrupt
|
||||
}
|
||||
|
||||
bufp := xioutil.ODirectPoolSmall.Get().(*[]byte)
|
||||
bufp := xioutil.ODirectPoolSmall.Get()
|
||||
defer xioutil.ODirectPoolSmall.Put(bufp)
|
||||
|
||||
for left > 0 {
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
@@ -34,7 +33,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
disk.MakeVol(context.Background(), volume)
|
||||
disk.MakeVol(t.Context(), volume)
|
||||
|
||||
writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10)
|
||||
|
||||
|
||||
@@ -48,9 +48,7 @@ func (bs *bootstrapTracer) Events() []madmin.TraceInfo {
|
||||
traceInfo := make([]madmin.TraceInfo, 0, bootstrapTraceLimit)
|
||||
|
||||
bs.mu.RLock()
|
||||
for _, i := range bs.info {
|
||||
traceInfo = append(traceInfo, i)
|
||||
}
|
||||
traceInfo = append(traceInfo, bs.info...)
|
||||
bs.mu.RUnlock()
|
||||
|
||||
return traceInfo
|
||||
|
||||
@@ -19,9 +19,13 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -30,7 +34,7 @@ import (
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// To abstract a node over network.
|
||||
@@ -43,10 +47,15 @@ type ServerSystemConfig struct {
|
||||
NEndpoints int
|
||||
CmdLines []string
|
||||
MinioEnv map[string]string
|
||||
Checksum string
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
if s1.Checksum != s2.Checksum {
|
||||
return fmt.Errorf("Expected MinIO binary checksum: %s, seen: %s", s1.Checksum, s2.Checksum)
|
||||
}
|
||||
|
||||
ns1 := s1.NEndpoints
|
||||
ns2 := s2.NEndpoints
|
||||
if ns1 != ns2 {
|
||||
@@ -82,7 +91,7 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
extra = append(extra, k)
|
||||
}
|
||||
}
|
||||
msg := "Expected same MINIO_ environment variables and values across all servers: "
|
||||
msg := "Expected MINIO_* environment name and values across all servers to be same: "
|
||||
if len(missing) > 0 {
|
||||
msg += fmt.Sprintf(`Missing environment values: %v. `, missing)
|
||||
}
|
||||
@@ -97,14 +106,17 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
||||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPERATOR_VERSION": {},
|
||||
"MINIO_VSPHERE_PLUGIN_VERSION": {},
|
||||
"MINIO_CI_CD": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() *ServerSystemConfig {
|
||||
@@ -120,7 +132,7 @@ func getServerSystemCfg() *ServerSystemConfig {
|
||||
}
|
||||
envValues[envK] = logger.HashString(env.Get(envK, ""))
|
||||
}
|
||||
scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues}
|
||||
scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues, Checksum: binaryChecksum}
|
||||
var cmdLines []string
|
||||
for _, ep := range globalEndpoints {
|
||||
cmdLines = append(cmdLines, ep.CmdLine)
|
||||
@@ -167,6 +179,26 @@ func (client *bootstrapRESTClient) String() string {
|
||||
return client.gridConn.String()
|
||||
}
|
||||
|
||||
var binaryChecksum = getBinaryChecksum()
|
||||
|
||||
func getBinaryChecksum() string {
|
||||
mw := md5.New()
|
||||
binPath, err := os.Executable()
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
b, err := os.Open(binPath)
|
||||
if err != nil {
|
||||
logger.Error("Calculating checksum failed: %s", err)
|
||||
return "00000000000000000000000000000000"
|
||||
}
|
||||
|
||||
defer b.Close()
|
||||
io.Copy(mw, b)
|
||||
return hex.EncodeToString(mw.Sum(nil))
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools, gm *grid.Manager) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointServerPools, gm)
|
||||
@@ -196,9 +228,9 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
||||
err := clnt.Verify(ctx, srcCfg)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt))
|
||||
bootstrapTraceMsg(fmt.Sprintf("bootstrapVerify: %v, endpoint: %s", err, clnt))
|
||||
if !isNetworkError(err) {
|
||||
logger.LogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
|
||||
bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
|
||||
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err))
|
||||
} else {
|
||||
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err))
|
||||
|
||||
@@ -79,6 +79,12 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@@ -92,9 +98,9 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 3
|
||||
// map header, size 4
|
||||
// write "NEndpoints"
|
||||
err = en.Append(0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
err = en.Append(0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -142,15 +148,25 @@ func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// write "Checksum"
|
||||
err = en.Append(0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteString(z.Checksum)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 3
|
||||
// map header, size 4
|
||||
// string "NEndpoints"
|
||||
o = append(o, 0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
o = append(o, 0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73)
|
||||
o = msgp.AppendInt(o, z.NEndpoints)
|
||||
// string "CmdLines"
|
||||
o = append(o, 0xa8, 0x43, 0x6d, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x73)
|
||||
@@ -165,6 +181,9 @@ func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendString(o, za0002)
|
||||
o = msgp.AppendString(o, za0003)
|
||||
}
|
||||
// string "Checksum"
|
||||
o = append(o, 0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d)
|
||||
o = msgp.AppendString(o, z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -241,6 +260,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
}
|
||||
z.MinioEnv[za0002] = za0003
|
||||
}
|
||||
case "Checksum":
|
||||
z.Checksum, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@@ -266,5 +291,6 @@ func (z *ServerSystemConfig) Msgsize() (s int) {
|
||||
s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003)
|
||||
}
|
||||
}
|
||||
s += 9 + msgp.StringPrefixSize + len(z.Checksum)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
@@ -114,7 +114,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: &cfgStr,
|
||||
@@ -203,7 +203,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
||||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
@@ -51,9 +52,9 @@ import (
|
||||
sse "github.com/minio/minio/internal/bucket/encryption"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/config/cache"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
@@ -61,8 +62,8 @@ import (
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -72,6 +73,8 @@ const (
|
||||
|
||||
xMinIOErrCodeHeader = "x-minio-error-code"
|
||||
xMinIOErrDescHeader = "x-minio-error-desc"
|
||||
|
||||
postPolicyBucketTagging = "tagging"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
@@ -90,7 +93,7 @@ const (
|
||||
// -- If IP of the entry doesn't match, this means entry is
|
||||
//
|
||||
// for another instance. Log an error to console.
|
||||
func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
||||
if len(buckets) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -98,7 +101,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
dnsLogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,10 +114,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
domainMissing := err == dns.ErrDomainMissing
|
||||
if dnsBuckets != nil {
|
||||
for _, bucket := range buckets {
|
||||
bucketsSet.Add(bucket.Name)
|
||||
r, ok := dnsBuckets[bucket.Name]
|
||||
bucketsSet.Add(bucket)
|
||||
r, ok := dnsBuckets[bucket]
|
||||
if !ok {
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() {
|
||||
@@ -133,7 +136,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// but if we do see a difference with local domain IPs with
|
||||
// hostSlice from etcd then we should update with newer
|
||||
// domainIPs, we proceed to do that here.
|
||||
bucketsToBeUpdated.Add(bucket.Name)
|
||||
bucketsToBeUpdated.Add(bucket)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -142,7 +145,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// bucket names are globally unique in federation at a given
|
||||
// path prefix, name collision is not allowed. We simply log
|
||||
// an error and continue.
|
||||
bucketsInConflict.Add(bucket.Name)
|
||||
bucketsInConflict.Add(bucket)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,13 +163,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
ctx := GlobalContext
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
dnsLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
dnsLogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -187,7 +190,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
||||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
dnsLogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
}(bucket)
|
||||
@@ -227,7 +230,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
@@ -341,11 +344,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
Created: dnsRecords[0].CreationDate,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(bucketsInfo, func(i, j int) bool {
|
||||
return bucketsInfo[i].Name < bucketsInfo[j].Name
|
||||
})
|
||||
|
||||
} else {
|
||||
// Invoke the list buckets.
|
||||
var err error
|
||||
@@ -426,7 +427,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
|
||||
// Content-Md5 is required should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
if !validateLengthAndChecksum(r) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -558,7 +559,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}, goi, opts, gerr)
|
||||
if dsc.ReplicateAny() {
|
||||
if object.VersionID != "" {
|
||||
object.VersionPurgeStatus = Pending
|
||||
object.VersionPurgeStatus = replication.VersionPurgePending
|
||||
object.VersionPurgeStatuses = dsc.PendingStatus()
|
||||
} else {
|
||||
object.DeleteMarkerReplicationStatus = dsc.PendingStatus()
|
||||
@@ -668,9 +669,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
continue
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Delete(bucket, dobj.ObjectName)
|
||||
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
|
||||
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == replication.VersionPurgePending) {
|
||||
// copy so we can re-add null ID.
|
||||
dobj := dobj
|
||||
if isDirObject(dobj.ObjectName) && dobj.VersionID == "" {
|
||||
@@ -790,7 +789,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// check if client is attempting to create more buckets, complain about it.
|
||||
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
|
||||
logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets))
|
||||
internalLogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets), logger.WarningKind)
|
||||
}
|
||||
|
||||
opts := MakeBucketOptions{
|
||||
@@ -840,7 +839,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
|
||||
}
|
||||
apiErr := ErrBucketAlreadyExists
|
||||
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() {
|
||||
@@ -871,7 +869,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Call site replication hook
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
|
||||
replLogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket))
|
||||
@@ -888,6 +886,30 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
})
|
||||
}
|
||||
|
||||
// multipartReader is just like https://pkg.go.dev/net/http#Request.MultipartReader but
|
||||
// rejects multipart/mixed as its not supported in S3 API.
|
||||
func multipartReader(r *http.Request) (*multipart.Reader, error) {
|
||||
v := r.Header.Get("Content-Type")
|
||||
if v == "" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if r.Body == nil {
|
||||
return nil, errors.New("missing form body")
|
||||
}
|
||||
d, params, err := mime.ParseMediaType(v)
|
||||
if err != nil {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
if d != "multipart/form-data" {
|
||||
return nil, http.ErrNotMultipart
|
||||
}
|
||||
boundary, ok := params["boundary"]
|
||||
if !ok {
|
||||
return nil, http.ErrMissingBoundary
|
||||
}
|
||||
return multipart.NewReader(r.Body, boundary), nil
|
||||
}
|
||||
|
||||
// PostPolicyBucketHandler - POST policy
|
||||
// ----------
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
@@ -921,9 +943,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength <= 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Here the parameter is the size of the form data that should
|
||||
// be loaded in memory, the remaining being put in temporary files.
|
||||
mp, err := r.MultipartReader()
|
||||
mp, err := multipartReader(r)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
@@ -935,7 +962,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
|
||||
var (
|
||||
reader io.Reader
|
||||
fileSize int64 = -1
|
||||
actualSize int64 = -1
|
||||
fileName string
|
||||
fanOutEntries = make([]minio.PutObjectFanOutEntry, 0, 100)
|
||||
)
|
||||
@@ -943,6 +970,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
maxParts := 1000
|
||||
// Canonicalize the form values into http.Header.
|
||||
formValues := make(http.Header)
|
||||
var headerLen int64
|
||||
for {
|
||||
part, err := mp.NextRawPart()
|
||||
if errors.Is(err, io.EOF) {
|
||||
@@ -984,7 +1012,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
headerLen += int64(len(name)) + int64(len(fileName))
|
||||
if name != "file" {
|
||||
if http.CanonicalHeaderKey(name) == http.CanonicalHeaderKey("x-minio-fanout-list") {
|
||||
dec := json.NewDecoder(part)
|
||||
@@ -995,7 +1023,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
if err := dec.Decode(&m); err != nil {
|
||||
part.Close()
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1005,8 +1033,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
buf := bytebufferpool.Get()
|
||||
// value, store as string in memory
|
||||
n, err := io.CopyN(&b, part, maxMemoryBytes+1)
|
||||
n, err := io.CopyN(buf, part, maxMemoryBytes+1)
|
||||
value := buf.String()
|
||||
buf.Reset()
|
||||
bytebufferpool.Put(buf)
|
||||
part.Close()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
@@ -1028,7 +1060,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], b.String())
|
||||
headerLen += n
|
||||
formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], value)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1037,6 +1070,21 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// The file or text content must be the last field in the form.
|
||||
// You cannot upload more than one file at a time.
|
||||
reader = part
|
||||
|
||||
possibleShardSize := (r.ContentLength - headerLen)
|
||||
if globalStorageClass.ShouldInline(possibleShardSize, false) { // keep versioned false for this check
|
||||
var b bytes.Buffer
|
||||
n, err := io.Copy(&b, reader)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
reader = &b
|
||||
actualSize = n
|
||||
}
|
||||
|
||||
// we have found the File part of the request we are done processing multipart-form
|
||||
break
|
||||
}
|
||||
@@ -1138,11 +1186,33 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize)
|
||||
clientETag, err := etag.FromContentMD5(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var forceMD5 []byte
|
||||
// Optimization: If SSE-KMS and SSE-C did not request Content-Md5. Use uuid as etag. Optionally enable this also
|
||||
// for server that is started with `--no-compat`.
|
||||
kind, _ := crypto.IsRequested(formValues)
|
||||
if !etag.ContentMD5Requested(formValues) && (kind == crypto.SSEC || kind == crypto.S3KMS || !globalServerCtxt.StrictS3Compat) {
|
||||
forceMD5 = mustGetUUIDBytes()
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReaderWithOpts(ctx, reader, hash.Options{
|
||||
Size: actualSize,
|
||||
MD5Hex: clientETag.String(),
|
||||
SHA256Hex: "",
|
||||
ActualSize: actualSize,
|
||||
DisableMD5: false,
|
||||
ForceMD5: forceMD5,
|
||||
})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if checksum != nil && checksum.Valid() {
|
||||
if err = hashReader.AddChecksumNoTrailer(formValues, false); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1199,9 +1269,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
opts.WantChecksum = checksum
|
||||
|
||||
fanOutOpts := fanOutOptions{Checksum: checksum}
|
||||
|
||||
if crypto.Requested(formValues) {
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
@@ -1246,8 +1316,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if actualSize >= 0 {
|
||||
info := ObjectInfo{Size: actualSize}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content/
|
||||
hashReader, err = hash.NewReader(ctx, reader, -1, "", "", -1)
|
||||
hashReader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@@ -1258,6 +1335,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
}
|
||||
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1327,7 +1405,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
Key: objInfo.Name,
|
||||
Error: errs[i].Error(),
|
||||
})
|
||||
|
||||
eventArgsList = append(eventArgsList, eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
BucketName: objInfo.Bucket,
|
||||
@@ -1340,22 +1417,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
continue
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.Expires.UTC().Format(http.TimeFormat),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
fanOutResp = append(fanOutResp, minio.PutObjectFanOutResponse{
|
||||
Key: objInfo.Name,
|
||||
ETag: getDecryptedETag(formValues, objInfo, false),
|
||||
@@ -1415,6 +1476,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if formValues.Get(postPolicyBucketTagging) != "" {
|
||||
tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging)))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
return
|
||||
}
|
||||
tagsStr := tags.String()
|
||||
opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr
|
||||
} else {
|
||||
// avoid user set an invalid tag using `X-Amz-Tagging`
|
||||
delete(opts.UserDefined, xhttp.AmzObjectTagging)
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1437,22 +1511,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
w.Header().Set(xhttp.Location, obj)
|
||||
}
|
||||
|
||||
asize, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
asize = objInfo.Size
|
||||
}
|
||||
|
||||
defer globalCacheConfig.Set(&cache.ObjectInfo{
|
||||
Key: objInfo.Name,
|
||||
Bucket: objInfo.Bucket,
|
||||
ETag: etag,
|
||||
ModTime: objInfo.ModTime,
|
||||
Expires: objInfo.ExpiresStr(),
|
||||
CacheControl: objInfo.CacheControl,
|
||||
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
||||
Size: asize,
|
||||
})
|
||||
|
||||
// Notify object created event.
|
||||
defer sendEvent(eventArgs{
|
||||
EventName: event.ObjectCreatedPost,
|
||||
@@ -1661,7 +1719,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
case rcfg.HasActiveRules("", true):
|
||||
case rcfg != nil && rcfg.HasActiveRules("", true):
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -1693,17 +1751,17 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
|
||||
dnsLogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
|
||||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
globalReplicationPool.Get().deleteResyncMetadata(ctx, bucket)
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
@@ -1749,6 +1807,10 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
return
|
||||
}
|
||||
|
||||
// Audit log tags.
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
reqInfo.SetTags("retention", config.String())
|
||||
|
||||
configData, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@@ -1776,7 +1838,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeObjectLockConfig,
|
||||
Bucket: bucket,
|
||||
ObjectLockConfig: &cfgStr,
|
||||
@@ -1880,7 +1942,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
||||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
@@ -1956,7 +2018,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
UpdatedAt: updatedAt,
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
// Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestRemoveBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testRemoveBucketHandler, endpoints: []string{"RemoveBucket"}})
|
||||
}
|
||||
|
||||
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -78,7 +78,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
|
||||
// Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestGetBucketLocationHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketLocationHandler, endpoints: []string{"GetBucketLocation"}})
|
||||
}
|
||||
|
||||
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -188,7 +188,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
if errorResponse.Code != testCase.errorResponse.Code {
|
||||
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
@@ -220,7 +219,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
// Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestHeadBucketHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testHeadBucketHandler, endpoints: []string{"HeadBucket"}})
|
||||
}
|
||||
|
||||
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
@@ -290,7 +289,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
if recV2.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Test for Anonymous/unsigned http request.
|
||||
@@ -322,7 +320,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
|
||||
|
||||
// Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListMultipartUploadsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListMultipartUploadsHandler, endpoints: []string{"ListMultipartUploads"}})
|
||||
}
|
||||
|
||||
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
|
||||
@@ -558,7 +556,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
// Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
|
||||
func TestListBucketsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListBucketsHandler, endpoints: []string{"ListBuckets"}})
|
||||
}
|
||||
|
||||
// testListBucketsHandler - Tests validate listing of buckets.
|
||||
@@ -649,7 +647,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
|
||||
|
||||
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
|
||||
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIDeleteMultipleObjectsHandler, endpoints: []string{"DeleteMultipleObjects", "PutBucketPolicy"}})
|
||||
}
|
||||
|
||||
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||
|
||||
@@ -17,12 +17,16 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/minio/minio/internal/bucket/lifecycle"
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
)
|
||||
|
||||
//go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE
|
||||
type lcEventSrc uint8
|
||||
|
||||
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
|
||||
//nolint:staticcheck,revive // Underscores are used here to indicate where common prefix ends and the enumeration name begins
|
||||
const (
|
||||
lcEventSrc_None lcEventSrc = iota
|
||||
lcEventSrc_Heal
|
||||
@@ -43,7 +47,7 @@ type lcAuditEvent struct {
|
||||
source lcEventSrc
|
||||
}
|
||||
|
||||
func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
func (lae lcAuditEvent) Tags() map[string]string {
|
||||
event := lae.Event
|
||||
src := lae.source
|
||||
const (
|
||||
@@ -55,7 +59,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
ilmNewerNoncurrentVersions = "ilm-newer-noncurrent-versions"
|
||||
ilmNoncurrentDays = "ilm-noncurrent-days"
|
||||
)
|
||||
tags := make(map[string]interface{}, 5)
|
||||
tags := make(map[string]string, 5)
|
||||
if src > lcEventSrc_None {
|
||||
tags[ilmSrc] = src.String()
|
||||
}
|
||||
@@ -63,7 +67,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
tags[ilmRuleID] = event.RuleID
|
||||
|
||||
if !event.Due.IsZero() {
|
||||
tags[ilmDue] = event.Due
|
||||
tags[ilmDue] = event.Due.Format(iso8601Format)
|
||||
}
|
||||
|
||||
// rule with Transition/NoncurrentVersionTransition in effect
|
||||
@@ -73,10 +77,10 @@ func (lae lcAuditEvent) Tags() map[string]interface{} {
|
||||
|
||||
// rule with NewernoncurrentVersions in effect
|
||||
if event.NewerNoncurrentVersions > 0 {
|
||||
tags[ilmNewerNoncurrentVersions] = event.NewerNoncurrentVersions
|
||||
tags[ilmNewerNoncurrentVersions] = strconv.Itoa(event.NewerNoncurrentVersions)
|
||||
}
|
||||
if event.NoncurrentDays > 0 {
|
||||
tags[ilmNoncurrentDays] = event.NoncurrentDays
|
||||
tags[ilmNoncurrentDays] = strconv.Itoa(event.NoncurrentDays)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -28,7 +27,7 @@ import (
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -53,7 +52,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// PutBucketLifecycle always needs a Content-Md5
|
||||
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
|
||||
if !validateLengthAndChecksum(r) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -64,19 +63,20 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(io.LimitReader(r.Body, r.ContentLength))
|
||||
bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
// Test S3 Bucket lifecycle APIs with wrong credentials
|
||||
func TestBucketLifecycleWrongCredentials(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlersWrongCredentials, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
|
||||
}
|
||||
|
||||
// Test for authentication
|
||||
@@ -145,7 +145,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType,
|
||||
|
||||
// Test S3 Bucket lifecycle APIs
|
||||
func TestBucketLifecycle(t *testing.T) {
|
||||
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
|
||||
ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlers, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}})
|
||||
}
|
||||
|
||||
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user