Compare commits

...

103 Commits

Author SHA1 Message Date
Andreas Auernhammer
01cb705c36 crypto: add support for KMS key versions
This commit adds support for KMS master key versions.
Now, MinIO stores any key version information returned by the
KMS as part of the object metadata. The key version identifies
a particular master key within a master key ring. When encrypting/
generating a DEK, MinIO has to remember the key version - similar to
the key name. When decrypting a DEK, MinIO sends the key version to
the KMS such that the KMS can identify the exact key version that
should be used to decrypt the object.

Existing objects don't have a key version. Hence, this field will
be empty.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2025-05-05 22:35:43 +02:00
jiuker
9ea14c88d8 cleanup: use NewWithOptions replace the Deprecated one (#21243) 2025-04-29 08:35:51 -07:00
jiuker
30a1261c22 fix: track object and bucket for exipreAll (#21241) 2025-04-27 21:19:38 -07:00
Matt Lloyd
0e017ab071 feat: support nats nkey seed auth (#21231) 2025-04-26 21:30:57 -07:00
Harshavardhana
f14198e3dc update with newer pkger release 2025-04-26 17:44:22 -07:00
Burkov Egor
93c389dbc9 typo: return actual error from RemoveRemoteTargetsForEndpoint (#21238) 2025-04-26 01:43:10 -07:00
jiuker
ddd9a84cd7 allow concurrent aborts on active uploadParts() (#21229)
allow aborting on active uploads in progress, however fail these
uploads subsequently during commit phase and return appropriate errors
2025-04-24 22:41:04 -07:00
Celis
b7540169a2 Add documentation for replication_max_lrg_workers (#21236) 2025-04-24 16:34:26 -07:00
Klaus Post
f01374950f Use go mod tool to install tools for go generate (#21232)
Use go tool for generators

* Use go.mod tool section
* Install tools with go generate
* Update dependencies
* Remove madmin fork.
2025-04-24 16:34:11 -07:00
Taran Pelkey
18aceae620 Fix nil dereference in adding service account (#21235)
Fixes #21234
2025-04-24 11:14:00 -07:00
Andreas Auernhammer
427826abc5 update minio/kms-go/kms SDK (#21233)
Signed-off-by: Andreas Auernhammer <github@aead.dev>
2025-04-24 08:33:57 -07:00
Harshavardhana
2780778c10 Revert "Fix: Change TTFB metric type to histogram (#20999)"
This reverts commit 8d223e07fb.
2025-04-23 13:56:18 -07:00
Shubhendu
2d8ba15b9e Correct spelling (#21225) 2025-04-23 08:13:23 -07:00
Minio Trusted
bd6dd55e7f Update yaml files to latest version RELEASE.2025-04-22T22-12-26Z 2025-04-22 22:34:07 +00:00
Matt Lloyd
0d7408fc99 feat: support nats tls handshake first (#21008) 2025-04-22 15:12:26 -07:00
jiuker
864f80e226 fix: batch expiry job doesn't report delete marker in batch-status (#21183) 2025-04-22 04:16:32 -07:00
Harshavardhana
0379d6a37f fix: permissions for docker-compose 2025-04-21 09:24:31 -07:00
Harshavardhana
43aa8e4259 support autogenerated credentials for KMS_SECRET_KEY properly (#21223)
we had a chicken and egg problem with this feature even
when used with kes the credentials generation would
not work in correct sequence causing setup/deployment
disruptions.

This PR streamlines all of this properly to ensure that
this functionality works as advertised.
2025-04-21 09:23:51 -07:00
Harshavardhana
e2ed696619 fix: docker-compose link since latest release 2025-04-20 10:05:30 -07:00
Klaus Post
fb3f67a597 Fix shared error buffer (#21203)
v.cancelFn(RemoteErr(m.Payload)) would use an already returned buffer.

Simplify code a bit as well by returning on errors.
2025-04-18 02:10:55 -07:00
dependabot[bot]
7ee75368e0 build(deps): bump github.com/nats-io/nats-server/v2 from 2.9.23 to 2.10.27 (#21191)
build(deps): bump github.com/nats-io/nats-server/v2

Bumps [github.com/nats-io/nats-server/v2](https://github.com/nats-io/nats-server) from 2.9.23 to 2.10.27.
- [Release notes](https://github.com/nats-io/nats-server/releases)
- [Changelog](https://github.com/nats-io/nats-server/blob/main/.goreleaser.yml)
- [Commits](https://github.com/nats-io/nats-server/compare/v2.9.23...v2.10.27)

---
updated-dependencies:
- dependency-name: github.com/nats-io/nats-server/v2
  dependency-version: 2.10.27
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-17 04:45:51 -07:00
dependabot[bot]
1d6478b8ae build(deps): bump golang.org/x/net from 0.34.0 to 0.38.0 in /docs/debugging/s3-verify (#21199)
build(deps): bump golang.org/x/net in /docs/debugging/s3-verify

Bumps [golang.org/x/net](https://github.com/golang/net) from 0.34.0 to 0.38.0.
- [Commits](https://github.com/golang/net/compare/v0.34.0...v0.38.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-version: 0.38.0
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-17 04:45:33 -07:00
dependabot[bot]
0581001b6f build(deps): bump golang.org/x/net from 0.37.0 to 0.38.0 (#21200)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.37.0 to 0.38.0.
- [Commits](https://github.com/golang/net/compare/v0.37.0...v0.38.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-version: 0.38.0
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-17 04:45:15 -07:00
dependabot[bot]
479303e7e9 build(deps): bump golang.org/x/crypto from 0.32.0 to 0.35.0 in /docs/debugging/inspect (#21192) 2025-04-16 14:54:16 -07:00
Burkov Egor
89aec6804b typo: fix return of checkDiskFatalErrs (#21121) 2025-04-16 08:20:41 -07:00
Taran Pelkey
eb33bc6bf5 Add New Accesskey Info and OpenID Accesskey List API endpoints (#21097) 2025-04-16 00:34:24 -07:00
dependabot[bot]
3310f740f0 build(deps): bump golang.org/x/crypto from 0.32.0 to 0.35.0 in /docs/debugging/s3-verify (#21185)
build(deps): bump golang.org/x/crypto in /docs/debugging/s3-verify

Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.32.0 to 0.35.0.
- [Commits](https://github.com/golang/crypto/compare/v0.32.0...v0.35.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-version: 0.35.0
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-15 07:00:14 -07:00
Burkov Egor
4595293ca0 typo: fix error msg for decoding XL headers (#21120) 2025-04-10 08:55:43 -07:00
Klaus Post
02a67cbd2a Fix buffered streams missing final entries (#21122)
On buffered streams the final entries could be missing, if a lot 
are delivered when stream ends.

Fixes end-of-stream cancelling return of final entries by canceling
with the StreamEOF error.
2025-04-10 08:29:19 -07:00
Harshavardhana
2b34e5b9ae move to go1.24 (#21114) 2025-04-09 07:28:39 -07:00
Minio Trusted
a6258668a6 Update yaml files to latest version RELEASE.2025-04-08T15-41-24Z 2025-04-08 19:37:51 +00:00
Krishnan Parthasarathi
d0cada583f ilm: Expect objects with only free versions when scanning (#21112) 2025-04-08 08:41:24 -07:00
Harshavardhana
0bd8f06b62 fix: healing to list, purge dangling objects (#621)
in a specific corner case when you only have dangling
objects with single shard left over, we end up a situation
where healing is unable to list this dangling object to
purge due to the fact that listing logic expected only
`len(disks)/2+1` - where as when you make this choice you
end up with a situation that the drive where this object
is present is not part of your expected disks list, causing
it to be never listed and ignored into perpetuity.

change the logic such that HealObjects() would be able
to listAndHeal() per set properly on all its drives, since
there is really no other way to do this cleanly, however
instead of "listing" on all erasure sets simultaneously, we
list on '3' at a time. So in a large enough cluster this is
fairly staggered.
2025-04-04 06:49:12 -07:00
Harshavardhana
6640be3bed fix: listParts crash when partNumberMarker is expected (#620)
fixes https://github.com/minio/minio/issues/21098
2025-04-04 06:44:38 -07:00
Anis Eleuch
eafeb27e90 decom: Ignore orphan delete markers in verification stage (#21106)
To make sure that no objects were skipped for any reason,
decommissioning does a second phase of listing to check if there
are some objects that need to be decommissioned. However, the code
forgot to skip orphan delete markers since the decom code already
skips it.

Make the code ignore delete markers in in the verification phase.

Co-authored-by: Anis Eleuch <anis@min.io>
2025-04-03 15:07:24 -07:00
Minio Trusted
f2c9eb0f79 Update yaml files to latest version RELEASE.2025-04-03T14-56-28Z 2025-04-03 18:57:40 +00:00
爱折腾的小竹同学
f2619d1f62 Fix description error in README (#21099)
There is prefix in json, but not in the equivalent command line. Although the role of prefix has been explained in the previous example, I think it should be supplemented.
2025-04-03 07:56:28 -07:00
Harshavardhana
8c70975283 make sure to validate signature unsigned trailer stream (#21103)
This is a security incident fix, it would seem like since
the implementation of unsigned payload trailer on PUTs,
we do not validate the signature of the incoming request.

The signature can be invalid and is totally being ignored,
this in-turn allows any arbitrary secret to upload objects
given the user has "WRITE" permissions on the bucket, since
acces-key is a public information in general exposes these
potential users with WRITE on the bucket to be used by any
arbitrary client to make a fake request to MinIO the signature
under Authorization: header is totally ignored.

A test has been added to cover this scenario and fail
appropriately.
2025-04-03 07:55:52 -07:00
Krishnan Parthasarathi
01447d2438 Fix evaluation of NewerNoncurrentVersions (#21096)
- Move VersionPurgeStatus into replication package
- ilm: Evaluate policy w/ obj retention/replication
- lifecycle: Use Evaluator to enforce ILM in scanner
- Unit tests covering ILM, replication and retention
- Simplify NewEvaluator constructor
2025-04-02 23:45:06 -07:00
Shubhendu
07f31e574c Try reconnect IAM systems if failed initially (#20333)
Fixes: https://github.com/minio/minio/issues/20118

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2025-04-02 10:29:33 -07:00
iamsagar99
8d223e07fb Fix: Change TTFB metric type to histogram (#20999) 2025-04-01 22:48:58 -07:00
Harshavardhana
4041a8727c start publishing latest-cicd images 2025-04-01 20:53:54 -07:00
Klaus Post
5f243fde9a Fix anonymous unsigned trailing headers (#21095)
Do not fail on anonymous requests with trailing headers.

Fixes #21005

With modified minio-go (will send PR):

```
<DEBUG> PUT /tbb/mc.exe HTTP/1.1
Host: 127.0.0.1:9001
User-Agent: MinIO (windows; amd64) minio-go/v7.0.90 mc/DEVELOPMENT.GOGET
Content-Length: 44301288
Accept-Encoding: zstd,gzip
Content-Encoding: aws-chunked
Content-Type: application/x-msdownload
X-Amz-Content-Sha256: STREAMING-UNSIGNED-PAYLOAD-TRAILER
X-Amz-Date: 20250401T150402Z
X-Amz-Decoded-Content-Length: 44295168
X-Amz-Trailer: x-amz-checksum-crc32

mc: <DEBUG> HTTP/1.1 200 OK
Content-Length: 0
Accept-Ranges: bytes
Date: Tue, 01 Apr 2025 15:04:02 GMT
Etag: "46273a30f232dc015ead1c0da8925c98"
Server: MinIO
Strict-Transport-Security: max-age=31536000; includeSubDomains
Vary: Origin
Vary: Accept-Encoding
X-Amz-Checksum-Crc32: wElc/A==
X-Amz-Id-2: 7987905dee74cdeb212432486a178e511309594cee7cb75f892cd53e35f09ea4
X-Amz-Request-Id: 18323A0F322B41C8
X-Content-Type-Options: nosniff
X-Ratelimit-Limit: 2478
X-Ratelimit-Remaining: 2478
X-Xss-Protection: 1; mode=block
```

Tested on multipart uploads as well.
2025-04-01 11:23:27 -07:00
Burkov Egor
a0e3f1cc18 internal: add handling of KVS config parse (#21079) 2025-04-01 08:28:26 -07:00
Name
b1bc641105 chore(all): replace map key deletion loop with clear() (#21082) 2025-04-01 08:28:06 -07:00
jiuker
e0c8738230 fix: token is invalid for admin heal when minio is distErasure on windows (#21092) 2025-04-01 08:21:33 -07:00
alingse
9aa24b1920 fix call toAPIErrorCode with a nil value error after check another err (#21083)
if check lerr != nil and return a toAPIErrorCode(nil)

it should  return toAPIErrorCode(lerr)
2025-03-31 13:31:15 -07:00
Taran Pelkey
53d40e41bc Add new API endpoint to revoke STS tokens (#21072) 2025-03-31 11:51:24 -07:00
Taran Pelkey
e88d494775 Migrate golanglint-ci config to V2 (#21081) 2025-03-29 17:56:02 -07:00
dependabot[bot]
b67f0cf721 build(deps): bump github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2 (#21056)
Bumps [github.com/golang-jwt/jwt/v4](https://github.com/golang-jwt/jwt) from 4.5.1 to 4.5.2.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v4.5.1...v4.5.2)

---
updated-dependencies:
- dependency-name: github.com/golang-jwt/jwt/v4
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-23 08:18:21 -07:00
Alexander Kalaj
46922c71b7 Updating Prom queries to include tilde needed to work (#21054) 2025-03-22 08:22:29 -07:00
dependabot[bot]
670edb4fcf build(deps): bump github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 (#21055)
Bumps [github.com/golang-jwt/jwt/v5](https://github.com/golang-jwt/jwt) from 5.2.1 to 5.2.2.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v5.2.1...v5.2.2)

---
updated-dependencies:
- dependency-name: github.com/golang-jwt/jwt/v5
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-22 08:21:04 -07:00
itsJohnySmith
42d4ab2a0a fix(templates): replace dash with underscore (#19566) 2025-03-14 13:01:11 -07:00
Harshavardhana
5e2eb372bf update dependencies for CVE fix x/net 2025-03-12 22:29:51 -07:00
Minio Trusted
cccb37a5ac Update yaml files to latest version RELEASE.2025-03-12T18-04-18Z 2025-03-12 18:22:31 +00:00
Anis Eleuch
dbf31af6cb decom: Ignore not found buckets (#509) (#21023)
When decommissioning is started, the list of buckets to decommission is
calculated, however, a bucket can be removed before decommissioning reaches
it. This will cause an infinite loop of listing error complaining about
the non-existence of the bucket. This commit will ignore
errVolumeNotFound to skip the not found bucket.
2025-03-12 11:04:18 -07:00
Klaus Post
93e40c3ab4 Disable unstable test (#20996)
Disable unstable test in vendored package. Only used for s3 select.
2025-03-12 10:26:50 -07:00
Aditya Manthramurthy
8aa0e9ff7c Update ssh and jws libs for fixed CVEs (#21017)
- https://pkg.go.dev/vuln/GO-2025-3488
- https://pkg.go.dev/vuln/GO-2025-3487
2025-03-12 08:16:19 -07:00
Aditya Manthramurthy
bbd6f18afb Update typos config (#21018) 2025-03-11 08:44:54 -07:00
Harshavardhana
2a3acc4f24 drive heal if we have enough success, do not error setList() (#516) 2025-03-10 19:57:24 -07:00
Klaus Post
11507d46da Enforce a bucket limit of 100 to v2 metrics calls (#20761)
Enforce a bucket count limit on metrics for v2 calls.

If people hit this limit, they should move to v3, as certain calls explode with high bucket count.

Reviewers: This *should* only affect v2 calls, but the complexity is overwhelming.
2025-02-28 11:33:08 -08:00
Minio Trusted
f9c62dea55 Update yaml files to latest version RELEASE.2025-02-28T09-55-16Z 2025-02-28 18:16:28 +00:00
Klaus Post
8c2c92f7af Fix healing probability for skipped folders (#20988)
We must update the heal probability when selectively skipping folders.
2025-02-28 01:55:16 -08:00
Aditya Manthramurthy
4c71f1b4ec fix: SFTP auth bypass with no pub key in LDAP (#20986)
If a user attempts to authenticate with a key but does not have an
sshpubkey attribute in LDAP, the server allows the connection, which 
means the server trusted the key without reason. This is now fixed, 
and a test has been added for validation.
2025-02-27 10:43:32 -08:00
Poorna
6cd8a372cb replication: set checksum type correctly (#20985)
Fixes: #20978
2025-02-26 15:17:28 -08:00
Anis Eleuch
953a3e2bbd check for errors on bitrotWriter Close() (#20982) 2025-02-26 11:26:13 -08:00
Mark Theunissen
7cc0c69228 Allow disabling of all X-Forwarded-For header processing (#20977) 2025-02-26 11:25:49 -08:00
Anis Eleuch
f129fd48f2 Update golang.org/x/crypto to address govulncheck complaint (#20983) 2025-02-26 08:15:09 -08:00
TripleChecker
bc4008ced4 Fix typos (#20970) 2025-02-26 01:25:50 -08:00
dependabot[bot]
526053339b build(deps): bump github.com/go-jose/go-jose/v4 from 4.0.4 to 4.0.5 (#20976)
Bumps [github.com/go-jose/go-jose/v4](https://github.com/go-jose/go-jose) from 4.0.4 to 4.0.5.
- [Release notes](https://github.com/go-jose/go-jose/releases)
- [Changelog](https://github.com/go-jose/go-jose/blob/main/CHANGELOG.md)
- [Commits](https://github.com/go-jose/go-jose/compare/v4.0.4...v4.0.5)

---
updated-dependencies:
- dependency-name: github.com/go-jose/go-jose/v4
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-26 01:25:19 -08:00
Taran Pelkey
62a35b3e77 Update SRSvcAccCreate with new type (#20974) 2025-02-24 17:43:59 -08:00
Taran Pelkey
39df134204 Fix importIAM issue with importing implied policies (#20956) 2025-02-19 10:10:53 -08:00
Minio Trusted
ad4cbce22d Update yaml files to latest version RELEASE.2025-02-18T16-25-55Z 2025-02-18 20:59:14 +00:00
Klaus Post
90f5e1e5f6 tests: Do not allow forced type asserts (#20905) 2025-02-18 08:25:55 -08:00
Klaus Post
aeabac9181 Test checksum types for invalid combinations (#20953) 2025-02-18 08:24:01 -08:00
Klaus Post
b312f13473 Extract all files from encrypted stream with inspect (#20937)
Allow multiple private keys and extract all files from streams.

Place files in the folder with `.enc` removed.

Do basic checks so streams cannot traverse outside of the folder.
2025-02-17 09:09:42 -08:00
Rodrigo dos Santos Felix
727a803bc0 fix(docs): update mc admin trace link to MinIO official docs (#20943) 2025-02-16 20:52:27 -08:00
Name
d0e443172d chore: remove unused and incorrect IsEmpty method from TargetIDSet (#20939) 2025-02-16 08:43:15 -08:00
Jeeva Kandasamy
60446e7ac0 ftp: Enable trailing headers, just like sftp (#20938) 2025-02-15 02:32:09 -08:00
Harshavardhana
b8544266e5 fix: typo in queuestore.go 2025-02-15 02:31:50 -08:00
Ramon de Klein
437dd4e32a Fix missing authorization check for PutObjectRetentionHandler (#20929) 2025-02-12 08:08:13 -08:00
Cesar N.
447054b841 Update console to 1.7.6 (#20925) 2025-02-11 15:43:04 -08:00
Harshavardhana
9bf43e54cd allow ARCH specific hotfixes 2025-02-11 14:33:31 -08:00
Manuel Reis
60f8423157 Quick patch for Snowball AutoExtract: #20883 (#20885)
* Checking allowance on empty prefix or Snowball-prefix - fixes #20883
* Check the policy for each object during Snowball auto-extraction
2025-02-10 15:52:59 -08:00
Klaus Post
4355ea3c3f (s)ftp: Enable trailing headers for upload (#20914)
Since we always "connect" to minio, it is fine.
2025-02-10 08:35:49 -08:00
Klaus Post
e30f1ad7bd Fix nil pointer deref in PeerPolicyMappingHandler (#20913)
The following lines will attempt to de-reference the nil value. Instead just return the error at once.
2025-02-10 08:35:13 -08:00
Minio Trusted
f00c8c4cce Update yaml files to latest version RELEASE.2025-02-07T23-21-09Z 2025-02-08 21:03:40 +00:00
Andreas Auernhammer
703f51164d kms: add MINIO_KMS_REPLICATE_KEYID option (#20909)
This commit adds the `MINIO_KMS_REPLICATE_KEYID` env. variable.
By default - if not specified or not set to `off` - MinIO will
replicate the KMS key ID of an object.

If `MINIO_KMS_REPLICATE_KEYID=off`, MinIO does not include the
object's KMS Key ID when replicating an object. However, it always
sets the SSE-KMS encryption header. This ensures that the object
gets encrypted using SSE-KMS. The target site chooses the KMS key
ID that gets used based on the site and bucket config.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2025-02-07 15:21:09 -08:00
Klaus Post
b8dde47d4e fix: multipart replication with single part objects (#20895)
x-amz-checksum-algorithm is not set, causing all multipart single-part objects
to fail to replicate going via sftp/FTP uploads.
2025-02-05 15:06:02 -08:00
Andreas Auernhammer
7fa3e39f85 sts: allow client-provided intermediate CAs (#20896)
This commit allows clients to provide a set of intermediate CA
certificates (up to `MaxIntermediateCAs`) that the server will
use as intermediate CAs when verifying the trust chain from the
client leaf certificate up to one trusted root CA.

This is required if the client leaf certificate is not issued by
a trusted CA directly but by an intermediate CA. Without this commit,
MinIO rejects such certificates.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2025-02-04 16:29:41 -08:00
Poorna
4df7a3aa8f fix: site replication of bucket deletion sync (#352)
Bucket deletion timestamp was not being passed back
in GetBucketInfo, which is needed to decide on the bucket
creation/deletion
2025-02-04 00:36:03 -08:00
Poorna
64a8f2e554 replication: default tag timestamps in CopyObject call (#20891)
If object is uploaded with tags, the internal tagging-timestamp tracked
for replication will be missing. Default to ModTime in such cases to
allow tags to be synced correctly.

Also fixing a regression in fetching tags and tag comparison
2025-02-04 00:35:55 -08:00
Minio Trusted
f4fd4ea66d Update yaml files to latest version RELEASE.2025-02-03T21-03-04Z 2025-02-04 06:55:11 +00:00
Anis Eleuch
712fe1a8df fix: proxy requests to honor global transport
* fix: proxy requests to honor global transport 
Load the globalProxyEndpoint properly

also, currently, the proxy requests will fail silently for batch cancel
even if the proxy fails; instead,d properly send the corresponding error back
for such proxy failures if opted

* pass the transport to the GetProxyEnpoints function

---------

Co-authored-by: Praveen raj Mani <praveen@minio.io>
2025-02-03 22:03:04 +01:00
Klaus Post
4a319bedc9 Redact sensitive fields from DescribeBatchJob (#20881)
Redacts the following if set:

* replicate/credentials/secretKey
* replicate/credentials/sessionToken
* expire/notify/token
2025-02-03 08:56:26 -08:00
Klaus Post
bdb3db6dad Add lock overload protection (#20876)
Reject new lock requests immediately when 1000 goroutines are queued 
for the local lock mutex.

We do not reject unlocking, refreshing, or maintenance; they add to the count.

The limit is set to allow for bursty behavior but prevent requests from 
overloading the server completely.
2025-01-31 11:54:34 -08:00
Klaus Post
abb385af41 Check for valid checksum (#20878)
Add a few safety measures for checksums.
2025-01-28 16:59:23 -08:00
Harshavardhana
4ee62606e4 update govulncheck 2025-01-28 11:11:08 -08:00
Anis Eleuch
079d64c801 DeleteObjects: Send delete to all pools (#172) (#20821)
Currently, DeleteObjects() tries to find the object's pool before
sending a delete request. This only works well when an object has
multiple versions in different pools since looking for the pool does
not consider the version-id. When an S3 client wants to
remove a version-id that exists in pool 2, the delete request will be
directed to pool one because it has another version of the same object.

This commit will remove looking for pool logic and will send a delete
request to all pools in parallel. This should not cause any performance
regression in most of the cases since the object will unlikely exist
in only one pool, and the performance price will be similar to
getPoolIndex() in that case.
2025-01-28 08:57:18 -08:00
Klaus Post
dcc000ae2c Allow URLs up to 32KB and improve parsing speed (#20874)
Before/after...
```
Benchmark_hasBadPathComponent/long-32          	   43936	     27232 ns/op	 146.89 MB/s	   32768 B/op	       1 allocs/op
Benchmark_hasBadPathComponent/long-32          	   89956	     13375 ns/op	 299.07 MB/s	       0 B/op	       0 allocs/op
```

* Remove unused.
2025-01-27 08:42:45 -08:00
Harshavardhana
c5d19ecebb do not expose secret-key to lambda event handler (#20870) 2025-01-24 11:27:43 -08:00
Harshavardhana
ed29a525b3 remove fips builds 2025-01-21 02:10:10 -08:00
Minio Trusted
020c46cd3c Update yaml files to latest version RELEASE.2025-01-20T14-49-07Z 2025-01-21 09:44:32 +00:00
333 changed files with 6227 additions and 3711 deletions

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4

View File

@@ -61,7 +61,7 @@ jobs:
# are turned off - i.e. if ldap="", then ldap server is not enabled for
# the tests.
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
ldap: ["", "localhost:389"]
etcd: ["", "http://localhost:2379"]
openid: ["", "http://127.0.0.1:5556/dex"]

View File

@@ -29,7 +29,7 @@ jobs:
- name: setup-go-step
uses: actions/setup-go@v5
with:
go-version: 1.23.x
go-version: 1.24.x
- name: github sha short
id: vars

View File

@@ -21,7 +21,7 @@ jobs:
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
steps:
- uses: actions/checkout@v4

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.23.x]
go-version: [1.24.x]
os: [ubuntu-latest]
steps:

View File

@@ -21,7 +21,8 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23.2
go-version: 1.24.x
cached: false
- name: Get official govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
shell: bash

View File

@@ -1,36 +1,64 @@
linters-settings:
gofumpt:
simplify: true
misspell:
locale: US
staticcheck:
checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016']
version: "2"
linters:
disable-all: true
default: none
enable:
- durationcheck
- forcetypeassert
- gocritic
- gofumpt
- goimports
- gomodguard
- govet
- ineffassign
- misspell
- revive
- staticcheck
- tenv
- typecheck
- unconvert
- unused
- usetesting
- whitespace
settings:
misspell:
locale: US
staticcheck:
checks:
- all
- -SA1008
- -SA1019
- -SA4000
- -SA9004
- -ST1000
- -ST1005
- -ST1016
- -U1000
exclusions:
generated: lax
rules:
- linters:
- forcetypeassert
path: _test\.go
- path: (.+)\.go$
text: 'empty-block:'
- path: (.+)\.go$
text: 'unused-parameter:'
- path: (.+)\.go$
text: 'dot-imports:'
- path: (.+)\.go$
text: should have a package comment
- path: (.+)\.go$
text: error strings should not be capitalized or end with punctuation or a newline
paths:
- third_party$
- builtin$
- examples$
issues:
exclude-use-default: false
exclude:
- "empty-block:"
- "unused-parameter:"
- "dot-imports:"
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline
max-issues-per-linter: 100
max-same-issues: 100
formatters:
enable:
- gofumpt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

View File

@@ -1,11 +1,5 @@
[files]
extend-exclude = [
".git/",
"docs/",
"CREDITS",
"go.mod",
"go.sum",
]
extend-exclude = [".git/", "docs/", "CREDITS", "go.mod", "go.sum"]
ignore-hidden = false
[default]
@@ -20,6 +14,7 @@ extend-ignore-re = [
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
"ERRO:",
"(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # ignore line
]
[default.extend-words]
@@ -40,3 +35,11 @@ extend-ignore-re = [
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
"thr" = "thr"
"toi" = "toi"
[type.go]
extend-ignore-identifiers-re = [
# Variants of `typ` used to mean `type` in golang as it is otherwise a
# keyword - some of these (like typ1 -> type1) can be fixed, but probably
# not worth the effort.
"[tT]yp[0-9]*",
]

896
CREDITS
View File

@@ -4095,214 +4095,6 @@ SOFTWARE.
================================================================
github.com/census-instrumentation/opencensus-proto
https://github.com/census-instrumentation/opencensus-proto
----------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================================
github.com/cespare/xxhash/v2
https://github.com/cespare/xxhash/v2
----------------------------------------------------------------
@@ -6754,6 +6546,420 @@ https://github.com/envoyproxy/go-control-plane
================================================================
github.com/envoyproxy/go-control-plane/envoy
https://github.com/envoyproxy/go-control-plane/envoy
----------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================================
github.com/envoyproxy/go-control-plane/ratelimit
https://github.com/envoyproxy/go-control-plane/ratelimit
----------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================================
github.com/envoyproxy/protoc-gen-validate
https://github.com/envoyproxy/protoc-gen-validate
----------------------------------------------------------------
@@ -7760,7 +7966,7 @@ https://github.com/go-ldap/ldap/v3
The MIT License (MIT)
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com)
Portions copyright (c) 2015-2016 go-ldap Authors
Portions copyright (c) 2015-2024 go-ldap Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -12447,6 +12653,39 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================
github.com/gorilla/mux
https://github.com/gorilla/mux
----------------------------------------------------------------
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================
github.com/gorilla/websocket
https://github.com/gorilla/websocket
----------------------------------------------------------------
@@ -18356,6 +18595,214 @@ For more information on this, and how to apply and follow the GNU AGPL, see
================================================================
github.com/minio/crc64nvme
https://github.com/minio/crc64nvme
----------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================================
github.com/minio/dnscache
https://github.com/minio/dnscache
----------------------------------------------------------------
@@ -22346,7 +22793,7 @@ https://github.com/minio/minio-go/v7
github.com/minio/mux
https://github.com/minio/mux
----------------------------------------------------------------
Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -24404,33 +24851,6 @@ https://github.com/modern-go/reflect2
================================================================
github.com/montanaflynn/stats
https://github.com/montanaflynn/stats
----------------------------------------------------------------
The MIT License (MIT)
Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================================
github.com/muesli/ansi
https://github.com/muesli/ansi
----------------------------------------------------------------
@@ -28451,7 +28871,7 @@ https://github.com/safchain/ethtool
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Copyright (c) 2015 The Ethtool Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
FROM golang:1.23-alpine as build
FROM golang:1.24-alpine as build
ARG TARGETARCH
ARG RELEASE

View File

@@ -1,4 +1,4 @@
FROM golang:1.23-alpine AS build
FROM golang:1.24-alpine AS build
ARG TARGETARCH
ARG RELEASE

View File

@@ -1,69 +0,0 @@
FROM golang:1.23-alpine AS build
ARG TARGETARCH
ARG RELEASE
ENV GOPATH=/go
ENV CGO_ENABLED=0
# Install curl and minisign
RUN apk add -U --no-cache ca-certificates && \
apk add -U --no-cache curl && \
go install aead.dev/minisign/cmd/minisign@v0.2.1
# Download minio binary and signature files
RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.sha256sum -o /go/bin/minio.sha256sum && \
chmod +x /go/bin/minio
# Download mc binary and signature files
RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.fips -o /go/bin/mc && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.fips.minisig -o /go/bin/mc.minisig && \
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.fips.sha256sum -o /go/bin/mc.sha256sum && \
chmod +x /go/bin/mc
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
chmod +x /go/bin/curl; \
fi
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
FROM registry.access.redhat.com/ubi9/ubi-micro:latest
ARG RELEASE
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="${RELEASE}" \
release="${RELEASE}" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key \
MINIO_ROOT_USER_FILE=access_key \
MINIO_ROOT_PASSWORD_FILE=secret_key \
MINIO_KMS_SECRET_KEY_FILE=kms_master_key \
MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \
MINIO_CONFIG_ENV_FILE=config.env
RUN chmod -R 777 /usr/bin
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio* /usr/bin/
COPY --from=build /go/bin/mc* /usr/bin/
COPY --from=build /go/bin/cur* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
EXPOSE 9000
VOLUME ["/data"]
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
CMD ["minio"]

View File

@@ -1,4 +1,4 @@
FROM golang:1.23-alpine AS build
FROM golang:1.24-alpine AS build
ARG TARGETARCH
ARG RELEASE

View File

@@ -2,8 +2,8 @@ PWD := $(shell pwd)
GOPATH := $(shell go env GOPATH)
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
GOARCH := $(shell go env GOARCH)
GOOS := $(shell go env GOOS)
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
VERSION ?= $(shell git describe --tags)
REPO ?= quay.io/minio
@@ -24,8 +24,6 @@ help: ## print this help
getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@v1.2.5
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
crosscompile: ## cross compile minio
@(env bash $(PWD)/buildscripts/cross-compile.sh)
@@ -180,7 +178,7 @@ build-debugging:
build: checks build-debugging ## builds minio to $(PWD)
@echo "Building minio binary to './minio'"
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
hotfix-vars:
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
@@ -188,9 +186,9 @@ hotfix-vars:
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.1/pkger_2.3.1_linux_amd64.deb
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
@sudo apt install ./pkger_2.3.1_linux_amd64.deb --yes
@wget -q -c https://github.com/minio/pkger/releases/download/v2.3.11/pkger_2.3.11_linux_amd64.deb
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.1.1/linux-systemd/distributed/minio.service
@sudo apt install ./pkger_2.3.11_linux_amd64.deb --yes
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
@@ -200,11 +198,11 @@ hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
@pkger -r $(VERSION) --ignore
hotfix-push: hotfix
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)"
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/
@scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive
@echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-$(GOOS)/archive/minio.$(VERSION)"
docker-hotfix-push: docker-hotfix
@docker push -q $(TAG) && echo "Published new container $(TAG)"

View File

@@ -122,7 +122,7 @@ You can also connect using any S3-compatible tool, such as the MinIO Client `mc`
## Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.21](https://golang.org/dl/#stable)
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
```sh
go install github.com/minio/minio@latest

View File

@@ -69,8 +69,10 @@ __init__() {
## this is needed because github actions don't have
## docker-compose on all runners
go install github.com/docker/compose/v2/cmd@latest
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
COMPOSE_VERSION=v2.35.1
mkdir -p /tmp/gopath/bin/
wget -O /tmp/gopath/bin/docker-compose https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-linux-x86_64
chmod +x /tmp/gopath/bin/docker-compose
cleanup

View File

@@ -38,6 +38,7 @@ import (
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/event"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms"
"github.com/minio/mux"
"github.com/minio/pkg/v3/policy"
@@ -980,7 +981,6 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
rpt.SetStatus(bucket, "", err)
continue
}
}
rptData, err := json.Marshal(rpt.BucketMetaImportErrs)
@@ -1039,7 +1039,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
}
if len(diffCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
case <-keepAliveTicker.C:
if len(diffCh) > 0 {
@@ -1048,7 +1048,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case <-ctx.Done():
return
}
@@ -1098,7 +1098,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
}
if len(mrfCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
case <-keepAliveTicker.C:
if len(mrfCh) > 0 {
@@ -1107,7 +1107,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case <-ctx.Done():
return
}

View File

@@ -125,7 +125,6 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate
@@ -416,7 +415,6 @@ func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *ht
return
}
if err = validateConfig(ctx, cfg, subSys); err != nil {
var validationErr ldap.Validation
if errors.As(err, &validationErr) {
// If we got an LDAP validation error, we need to send appropriate

View File

@@ -214,10 +214,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
}
// Check if we are creating svc account for request sender.
isSvcAccForRequestor := false
if targetUser == requestorUser || targetUser == requestorParentUser {
isSvcAccForRequestor = true
}
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
var (
targetGroups []string
@@ -345,7 +342,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
Name: newCred.Name,
Description: newCred.Description,
Claims: opts.claims,
SessionPolicy: createReq.Policy,
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
Status: auth.AccountOn,
Expiration: createReq.Expiration,
},

View File

@@ -0,0 +1,246 @@
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"encoding/json"
"errors"
"net/http"
"sort"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/pkg/v3/policy"
)
const dummyRoleARN = "dummy-internal"
// ListAccessKeysOpenIDBulk - GET /minio/admin/v3/idp/openid/list-access-keys-bulk
func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if !globalIAMSys.OpenIDConfig.Enabled {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminOpenIDNotEnabled), r.URL)
return
}
userList := r.Form["users"]
isAll := r.Form.Get("all") == "true"
selfOnly := !isAll && len(userList) == 0
cfgName := r.Form.Get("configName")
allConfigs := r.Form.Get("allConfigs") == "true"
if cfgName == "" && !allConfigs {
cfgName = madmin.Default
}
if isAll && len(userList) > 0 {
// This should be checked on client side, so return generic error
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
// Empty DN list and not self, list access keys for all users
if isAll {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListUsersAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
} else if len(userList) == 1 && userList[0] == cred.ParentUser {
selfOnly = true
}
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
DenyOnly: selfOnly,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if selfOnly && len(userList) == 0 {
selfDN := cred.AccessKey
if cred.ParentUser != "" {
selfDN = cred.ParentUser
}
userList = append(userList, selfDN)
}
listType := r.Form.Get("listType")
var listSTSKeys, listServiceAccounts bool
switch listType {
case madmin.AccessKeyListUsersOnly:
listSTSKeys = false
listServiceAccounts = false
case madmin.AccessKeyListSTSOnly:
listSTSKeys = true
listServiceAccounts = false
case madmin.AccessKeyListSvcaccOnly:
listSTSKeys = false
listServiceAccounts = true
case madmin.AccessKeyListAll:
listSTSKeys = true
listServiceAccounts = true
default:
err := errors.New("invalid list type")
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
return
}
s := globalServerConfig.Clone()
roleArnMap := make(map[string]string)
// Map of configs to a map of users to their access keys
cfgToUsersMap := make(map[string]map[string]madmin.OpenIDUserAccessKeys)
configs, err := globalIAMSys.OpenIDConfig.GetConfigList(s)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, config := range configs {
if !allConfigs && cfgName != config.Name {
continue
}
arn := dummyRoleARN
if config.RoleARN != "" {
arn = config.RoleARN
}
roleArnMap[arn] = config.Name
newResp := make(map[string]madmin.OpenIDUserAccessKeys)
cfgToUsersMap[config.Name] = newResp
}
if len(roleArnMap) == 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL)
return
}
userSet := set.CreateStringSet(userList...)
accessKeys, err := globalIAMSys.ListAllAccessKeys(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, accessKey := range accessKeys {
// Filter out any disqualifying access keys
_, ok := accessKey.Claims[subClaim]
if !ok {
continue // OpenID access keys must have a sub claim
}
if (!listSTSKeys && !accessKey.IsServiceAccount()) || (!listServiceAccounts && accessKey.IsServiceAccount()) {
continue // skip if not the type we want
}
arn, ok := accessKey.Claims[roleArnClaim].(string)
if !ok {
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
continue // skip if no roleArn and no policy claim
}
}
matchingCfgName, ok := roleArnMap[arn]
if !ok {
continue // skip if not part of the target config
}
var id string
if idClaim := globalIAMSys.OpenIDConfig.GetUserIDClaim(matchingCfgName); idClaim != "" {
id, _ = accessKey.Claims[idClaim].(string)
}
if !userSet.IsEmpty() && !userSet.Contains(accessKey.ParentUser) && !userSet.Contains(id) {
continue // skip if not in the user list
}
openIDUserAccessKeys, ok := cfgToUsersMap[matchingCfgName][accessKey.ParentUser]
// Add new user to map if not already present
if !ok {
var readableClaim string
if rc := globalIAMSys.OpenIDConfig.GetUserReadableClaim(matchingCfgName); rc != "" {
readableClaim, _ = accessKey.Claims[rc].(string)
}
openIDUserAccessKeys = madmin.OpenIDUserAccessKeys{
MinioAccessKey: accessKey.ParentUser,
ID: id,
ReadableName: readableClaim,
}
}
svcAccInfo := madmin.ServiceAccountInfo{
AccessKey: accessKey.AccessKey,
Expiration: &accessKey.Expiration,
}
if accessKey.IsServiceAccount() {
openIDUserAccessKeys.ServiceAccounts = append(openIDUserAccessKeys.ServiceAccounts, svcAccInfo)
} else {
openIDUserAccessKeys.STSKeys = append(openIDUserAccessKeys.STSKeys, svcAccInfo)
}
cfgToUsersMap[matchingCfgName][accessKey.ParentUser] = openIDUserAccessKeys
}
// Convert map to slice and sort
resp := make([]madmin.ListAccessKeysOpenIDResp, 0, len(cfgToUsersMap))
for cfgName, usersMap := range cfgToUsersMap {
users := make([]madmin.OpenIDUserAccessKeys, 0, len(usersMap))
for _, user := range usersMap {
users = append(users, user)
}
sort.Slice(users, func(i, j int) bool {
return users[i].MinioAccessKey < users[j].MinioAccessKey
})
resp = append(resp, madmin.ListAccessKeysOpenIDResp{
ConfigName: cfgName,
Users: users,
})
}
sort.Slice(resp, func(i, j int) bool {
return resp[i].ConfigName < resp[j].ConfigName
})
data, err := json.Marshal(resp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}

View File

@@ -258,8 +258,8 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
// concurrent rebalance-start commands.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
if proxyEp.Host == ep.Host {
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
return
}
}
@@ -329,8 +329,8 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
// pools may temporarily have out of date info on the others.
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
if proxyEp.Host == ep.Host {
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
return
}
}
@@ -383,8 +383,8 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h
return
}
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
if proxyEp.Host == host && !proxyEp.IsLocal {
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
return true
}
}

View File

@@ -197,12 +197,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
return
}
checkDenyOnly := false
if name == cred.AccessKey {
// Check that there is no explicit deny - otherwise it's allowed
// to view one's own info.
checkDenyOnly = true
}
checkDenyOnly := name == cred.AccessKey
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
@@ -493,12 +488,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return
}
checkDenyOnly := false
if accessKey == cred.AccessKey {
// Check that there is no explicit deny - otherwise it's allowed
// to change one's own password.
checkDenyOnly = true
}
checkDenyOnly := accessKey == cred.AccessKey
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
@@ -689,10 +679,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
}
// Check if we are creating svc account for request sender.
isSvcAccForRequestor := false
if targetUser == requestorUser || targetUser == requestorParentUser {
isSvcAccForRequestor = true
}
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
// If we are creating svc account for request sender, ensure
// that targetUser is a real user (i.e. not derived
@@ -783,7 +770,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
Name: newCred.Name,
Description: newCred.Description,
Claims: opts.claims,
SessionPolicy: createReq.Policy,
SessionPolicy: madmin.SRSessionPolicy(createReq.Policy),
Status: auth.AccountOn,
Expiration: createReq.Expiration,
},
@@ -907,7 +894,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
Status: opts.status,
Name: opts.name,
Description: opts.description,
SessionPolicy: updateReq.NewPolicy,
SessionPolicy: madmin.SRSessionPolicy(updateReq.NewPolicy),
Expiration: updateReq.NewExpiration,
},
},
@@ -1487,8 +1474,8 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
return
}
effectivePolicy = globalIAMSys.GetCombinedPolicy(policies...)
}
buf, err = json.MarshalIndent(effectivePolicy, "", " ")
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -2003,6 +1990,227 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
writeSuccessResponseJSON(w, encryptedData)
}
// RevokeTokens - POST /minio/admin/v3/revoke-tokens/{userProvider}
func (a adminAPIHandlers) RevokeTokens(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
userProvider := mux.Vars(r)["userProvider"]
user := r.Form.Get("user")
tokenRevokeType := r.Form.Get("tokenRevokeType")
fullRevoke := r.Form.Get("fullRevoke") == "true"
isTokenSelfRevoke := user == ""
if !isTokenSelfRevoke {
var err error
user, err = getUserWithProvider(ctx, userProvider, user, false)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
if (user != "" && tokenRevokeType == "" && !fullRevoke) || (tokenRevokeType != "" && fullRevoke) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
adminPrivilege := globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.RemoveServiceAccountAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
})
if !adminPrivilege || isTokenSelfRevoke {
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if !isTokenSelfRevoke && user != parentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
user = parentUser
}
// Infer token revoke type from the request if requestor is STS.
if isTokenSelfRevoke && tokenRevokeType == "" && !fullRevoke {
if cred.IsTemp() {
tokenRevokeType, _ = cred.Claims[tokenRevokeTypeClaim].(string)
}
if tokenRevokeType == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNoTokenRevokeType), r.URL)
return
}
}
err := globalIAMSys.RevokeTokens(ctx, user, tokenRevokeType)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessNoContent(w)
}
// InfoAccessKey - GET /minio/admin/v3/info-access-key?access-key=<access-key>
func (a adminAPIHandlers) InfoAccessKey(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
accessKey := mux.Vars(r)["accessKey"]
if accessKey == "" {
accessKey = cred.AccessKey
}
u, ok := globalIAMSys.GetUser(ctx, accessKey)
targetCred := u.Credentials
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
}) {
// If requested user does not exist and requestor is not allowed to list service accounts, return access denied.
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
requestUser := cred.AccessKey
if cred.ParentUser != "" {
requestUser = cred.ParentUser
}
if requestUser != targetCred.ParentUser {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
}
if !ok {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchAccessKey), r.URL)
return
}
var (
sessionPolicy *policy.Policy
err error
userType string
)
switch {
case targetCred.IsTemp():
userType = "STS"
_, sessionPolicy, err = globalIAMSys.GetTemporaryAccount(ctx, accessKey)
if err == errNoSuchTempAccount {
err = errNoSuchAccessKey
}
case targetCred.IsServiceAccount():
userType = "Service Account"
_, sessionPolicy, err = globalIAMSys.GetServiceAccount(ctx, accessKey)
if err == errNoSuchServiceAccount {
err = errNoSuchAccessKey
}
default:
err = errNoSuchAccessKey
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// if session policy is nil or empty, then it is implied policy
impliedPolicy := sessionPolicy == nil || (sessionPolicy.Version == "" && len(sessionPolicy.Statements) == 0)
var svcAccountPolicy policy.Policy
if !impliedPolicy {
svcAccountPolicy = *sessionPolicy
} else {
policiesNames, err := globalIAMSys.PolicyDBGet(targetCred.ParentUser, targetCred.Groups...)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
svcAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...)
}
policyJSON, err := json.MarshalIndent(svcAccountPolicy, "", " ")
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var expiration *time.Time
if !targetCred.Expiration.IsZero() && !targetCred.Expiration.Equal(timeSentinel) {
expiration = &targetCred.Expiration
}
userProvider := guessUserProvider(targetCred)
infoResp := madmin.InfoAccessKeyResp{
AccessKey: accessKey,
InfoServiceAccountResp: madmin.InfoServiceAccountResp{
ParentUser: targetCred.ParentUser,
Name: targetCred.Name,
Description: targetCred.Description,
AccountStatus: targetCred.Status,
ImpliedPolicy: impliedPolicy,
Policy: string(policyJSON),
Expiration: expiration,
},
UserType: userType,
UserProvider: userProvider,
}
populateProviderInfoFromClaims(targetCred.Claims, userProvider, &infoResp)
data, err := json.Marshal(infoResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
const (
allPoliciesFile = "policies.json"
allUsersFile = "users.json"
@@ -2169,7 +2377,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
SecretKey: acc.Credentials.SecretKey,
Groups: acc.Credentials.Groups,
Claims: claims,
SessionPolicy: json.RawMessage(policyJSON),
SessionPolicy: policyJSON,
Status: acc.Credentials.Status,
Name: sa.Name,
Description: sa.Description,
@@ -2279,7 +2487,6 @@ func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiV
// import policies first
{
f, err := zr.Open(pathJoin(iamAssetsDir, allPoliciesFile))
switch {
case errors.Is(err, os.ErrNotExist):
@@ -2362,7 +2569,6 @@ func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiV
} else {
added.Users = append(added.Users, accessKey)
}
}
}
}
@@ -2675,7 +2881,7 @@ func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) e
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
return nil
}
dur := exp.Sub(time.Now())
dur := time.Until(*exp)
if dur <= 0 {
return errors.New("unsupported expiration time")
}
@@ -2753,7 +2959,7 @@ func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.
denyOnly := (targetUser == cred.AccessKey || targetUser == cred.ParentUser)
if ldap && !denyOnly {
res, _ := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
if res.NormDN == cred.ParentUser {
if res != nil && res.NormDN == cred.ParentUser {
denyOnly = true
}
}

View File

@@ -160,7 +160,7 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) {
}
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
s.TestSuiteCommon.RestartTestServer(c)
s.RestartTestServer(c)
s.iamSetup(c)
}

View File

@@ -49,6 +49,7 @@ import (
"github.com/klauspost/compress/zip"
"github.com/minio/madmin-go/v3"
"github.com/minio/madmin-go/v3/estream"
"github.com/minio/madmin-go/v3/logger/log"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/dsync"
@@ -59,7 +60,6 @@ import (
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v3/logger/message/log"
xnet "github.com/minio/pkg/v3/net"
"github.com/minio/pkg/v3/policy"
"github.com/secure-io/sio-go"
@@ -829,7 +829,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request)
}
// Flush before waiting for next...
w.(http.Flusher).Flush()
xhttp.Flush(w)
select {
case <-ticker.C:
@@ -1320,7 +1320,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
}
// Analyze the heal token and route the request accordingly
token, success := proxyRequestByToken(ctx, w, r, hip.clientToken)
token, _, success := proxyRequestByToken(ctx, w, r, hip.clientToken, false)
if success {
return
}
@@ -1359,7 +1359,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case hr := <-respCh:
switch hr.apiErr {
case noError:
@@ -1367,7 +1367,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
if _, err := w.Write(hr.respBytes); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
} else {
writeSuccessResponseJSON(w, hr.respBytes)
}
@@ -1394,7 +1394,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
if _, err := w.Write(errorRespJSON); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
break forLoop
}
@@ -1407,7 +1407,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
if exists && !nh.hasEnded() && len(nh.currentStatus.Items) > 0 {
clientToken := nh.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s:%d", nh.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
clientToken = fmt.Sprintf("%s%s%d", nh.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
b, err := json.Marshal(madmin.HealStartSuccess{
ClientToken: clientToken,
@@ -1611,7 +1611,6 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
if err != nil || ctx.Err() != nil || totalRx > 100*humanize.GiByte {
break
}
}
w.WriteHeader(http.StatusOK)
}
@@ -1840,7 +1839,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
return
}
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case result, ok := <-ch:
if !ok {
return
@@ -1849,7 +1848,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
return
}
prevResult = result
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
}
}
@@ -1958,7 +1957,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case result, ok := <-ch:
if !ok {
return
@@ -1966,7 +1965,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
if err := enc.Encode(result); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
}
}
@@ -2083,7 +2082,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
grid.PutByteBuffer(entry)
if len(traceCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
case <-keepAliveTicker.C:
if len(traceCh) > 0 {
@@ -2092,7 +2091,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case <-ctx.Done():
return
}
@@ -2184,7 +2183,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
grid.PutByteBuffer(log)
if len(logCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
case <-keepAliveTicker.C:
if len(logCh) > 0 {
@@ -2193,7 +2192,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case <-ctx.Done():
return
}
@@ -2677,7 +2676,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
// disk metrics are already included under drive info of each server
getRealtimeMetrics := func() *madmin.RealtimeMetrics {
var m madmin.RealtimeMetrics
var types madmin.MetricType = madmin.MetricsAll &^ madmin.MetricsDisk
types := madmin.MetricsAll &^ madmin.MetricsDisk
mLocal := collectLocalMetrics(types, collectMetricsOpts{})
m.Merge(&mLocal)
cctx, cancel := context.WithTimeout(healthCtx, time.Second/2)
@@ -2721,7 +2720,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
poolsArgs := re.ReplaceAllString(cmdLine, `$3`)
var anonPools []string
if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) {
if !strings.Contains(poolsArgs, "{") || !strings.Contains(poolsArgs, "}") {
// No ellipses pattern. Anonymize host name from every pool arg
pools := strings.Fields(poolsArgs)
anonPools = make([]string, len(pools))
@@ -2963,13 +2962,13 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
}
if len(healthInfoCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
xhttp.Flush(w)
}
case <-ticker.C:
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
xhttp.Flush(w)
case <-healthCtx.Done():
return
}
@@ -3421,7 +3420,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
}
// save the format.json as part of inspect by default
if !(volume == minioMetaBucket && file == formatConfigFile) {
if volume != minioMetaBucket || file != formatConfigFile {
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
}
if !errors.Is(err, errFileNotFound) {

View File

@@ -263,7 +263,7 @@ func buildAdminRequest(queryVal url.Values, method, path string,
}
func TestAdminServerInfo(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
adminTestBed, err := prepareAdminErasureTestBed(ctx)

View File

@@ -260,7 +260,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
} else {
clientToken := he.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s:%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
clientToken = fmt.Sprintf("%s%s%d", he.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
hsp = madmin.HealStopSuccess{
@@ -331,7 +331,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
clientToken := h.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
clientToken = fmt.Sprintf("%s%s%d", h.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
if h.clientToken == bgHealingUUID {

View File

@@ -246,6 +246,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
// Access key (service account/STS) operations
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-access-keys-bulk").HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysBulk)).Queries("listType", "{listType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-access-key").HandlerFunc(adminMiddleware(adminAPI.InfoAccessKey)).Queries("accessKey", "{accessKey:.*}")
// Info policy IAM latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
@@ -312,6 +313,11 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
// LDAP IAM operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP))
// OpenID specific service accounts ops
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/openid/list-access-keys-bulk").
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysOpenIDBulk)).Queries("listType", "{listType:.*}")
// -- END IAM APIs --
// GetBucketQuotaConfig
@@ -424,6 +430,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
// -- Health API --
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler))
// STS Revocation
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/revoke-tokens/{userProvider}").HandlerFunc(adminMiddleware(adminAPI.RevokeTokens))
}
// If none of the routes match add default error handler routes

View File

@@ -32,6 +32,8 @@ type DeletedObject struct {
DeleteMarkerMTime DeleteMarkerMTime `xml:"-"`
// MinIO extensions to support delete marker replication
ReplicationState ReplicationState `xml:"-"`
found bool // the object was found during deletion
}
// DeleteMarkerMTime is an embedded type containing time.Time for XML marshal
@@ -42,10 +44,10 @@ type DeleteMarkerMTime struct {
// MarshalXML encodes expiration date if it is non-zero and encodes
// empty string otherwise
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if t.Time.IsZero() {
if t.IsZero() {
return nil
}
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
return e.EncodeElement(t.Format(time.RFC3339), startElement)
}
// ObjectV object version key/versionId

View File

@@ -214,6 +214,9 @@ const (
ErrPolicyNotAttached
ErrExcessData
ErrPolicyInvalidName
ErrNoTokenRevokeType
ErrAdminOpenIDNotEnabled
ErrAdminNoSuchAccessKey
// Add new error codes here.
// SSE-S3/SSE-KMS related API errors
@@ -567,6 +570,11 @@ var errorCodes = errorCodeMap{
Description: "Policy name may not contain comma",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminOpenIDNotEnabled: {
Code: "OpenIDNotEnabled",
Description: "No enabled OpenID Connect identity providers",
HTTPStatusCode: http.StatusBadRequest,
},
ErrPolicyTooLarge: {
Code: "PolicyTooLarge",
Description: "Policy exceeds the maximum allowed document size.",
@@ -1264,6 +1272,16 @@ var errorCodes = errorCodeMap{
Description: "The security token included in the request is invalid",
HTTPStatusCode: http.StatusForbidden,
},
ErrNoTokenRevokeType: {
Code: "InvalidArgument",
Description: "No token revoke type specified and one could not be inferred from the request",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminNoSuchAccessKey: {
Code: "XMinioAdminNoSuchAccessKey",
Description: "The specified access key does not exist.",
HTTPStatusCode: http.StatusNotFound,
},
// S3 extensions.
ErrContentSHA256Mismatch: {
@@ -2161,6 +2179,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrAdminNoSuchUserLDAPWarn
case errNoSuchServiceAccount:
apiErr = ErrAdminServiceAccountNotFound
case errNoSuchAccessKey:
apiErr = ErrAdminNoSuchAccessKey
case errNoSuchGroup:
apiErr = ErrAdminNoSuchGroup
case errGroupNotEmpty:
@@ -2254,6 +2274,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrServerNotInitialized
case errBucketMetadataNotInitialized:
apiErr = ErrBucketMetadataNotInitialized
case hash.ErrInvalidChecksum:
apiErr = ErrInvalidChecksum
}
// Compression errors

View File

@@ -18,7 +18,6 @@
package cmd
import (
"context"
"errors"
"testing"
@@ -64,7 +63,7 @@ var toAPIErrorTests = []struct {
}
func TestAPIErrCode(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
for i, testCase := range toAPIErrorTests {
errCode := toAPIErrorCode(ctx, testCase.err)
if errCode != testCase.errCode {

View File

@@ -34,7 +34,8 @@ func TestNewRequestID(t *testing.T) {
e = char
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {
isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')
if !isAlnum {
t.Fail()
}
}

View File

@@ -520,7 +520,6 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
}
case crypto.SSEC:
m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
}
var toRemove []string
@@ -791,7 +790,7 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse {
cs := oi.decryptChecksums(0, h)
cs, _ := oi.decryptChecksums(0, h)
c := CompleteMultipartUploadResponse{
Location: location,
Bucket: bucket,

View File

@@ -227,13 +227,13 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
}
// Skip wrapping with the gzip middleware if specified.
var gzippedHandler http.HandlerFunc = tracedHandler
gzippedHandler := tracedHandler
if !handlerFlags.has(noGZS3HFlag) {
gzippedHandler = gzipHandler(gzippedHandler)
}
// Skip wrapping with throttling middleware if specified.
var throttledHandler http.HandlerFunc = gzippedHandler
throttledHandler := gzippedHandler
if !handlerFlags.has(noThrottleS3HFlag) {
throttledHandler = maxClients(throttledHandler)
}

File diff suppressed because one or more lines are too long

View File

@@ -96,7 +96,7 @@ func isRequestSignStreamingTrailerV4(r *http.Request) bool {
// Verify if the request has AWS Streaming Signature Version '4', with unsigned content and trailer.
func isRequestUnsignedTrailerV4(r *http.Request) bool {
return r.Header.Get(xhttp.AmzContentSha256) == unsignedPayloadTrailer &&
r.Method == http.MethodPut && strings.Contains(r.Header.Get(xhttp.ContentEncoding), streamingContentEncoding)
r.Method == http.MethodPut
}
// Authorization type.
@@ -162,7 +162,6 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
s3Err := ErrAccessDenied
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
getRequestAuthType(r) == authTypeSigned {
// Get credential information from the request.
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err != ErrNone {
@@ -364,7 +363,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
var cred auth.Credentials
var owner bool
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned:
case authTypeUnknown, authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
return ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
@@ -675,32 +674,6 @@ func setAuthMiddleware(h http.Handler) http.Handler {
})
}
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, APIErrorCode) {
var cred auth.Credentials
var owner bool
var s3Err APIErrorCode
switch atype {
case authTypeUnknown, authTypeStreamingSigned:
return cred, owner, ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return cred, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypePresigned, authTypeSigned:
region := globalSite.Region()
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return cred, owner, s3Err
}
return cred, owner, ErrNone
}
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) {
var retSet bool
if cred.AccessKey == "" {
@@ -755,8 +728,14 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
return ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer:
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
case authTypeStreamingUnsignedTrailer:
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err == ErrMissingFields {
// Could be anonymous. cred + owner is zero value.
s3Err = ErrNone
}
}
if s3Err != ErrNone {
return s3Err

View File

@@ -413,7 +413,7 @@ func TestIsReqAuthenticated(t *testing.T) {
}
func TestCheckAdminRequestAuthType(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
@@ -450,7 +450,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
}
func TestValidateAdminSignature(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)

View File

@@ -195,8 +195,8 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
return false
}
}
}
if len(ef.Metadata) > 0 && !obj.DeleteMarker {
for _, kv := range ef.Metadata {
// Object (version) must match all x-amz-meta and
@@ -289,6 +289,16 @@ type BatchJobExpire struct {
var _ yaml.Unmarshaler = &BatchJobExpire{}
// RedactSensitive will redact any sensitive information in b.
func (r *BatchJobExpire) RedactSensitive() {
if r == nil {
return
}
if r.NotificationCfg.Token != "" {
r.NotificationCfg.Token = redactedText
}
}
// UnmarshalYAML - BatchJobExpire extends default unmarshal to extract line, col information.
func (r *BatchJobExpire) UnmarshalYAML(val *yaml.Node) error {
type expireJob BatchJobExpire
@@ -414,12 +424,12 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
go func(toExpire []expireObjInfo) {
defer wk.Give()
toExpireAll := make([]ObjectInfo, 0, len(toExpire))
toExpireAll := make([]expireObjInfo, 0, len(toExpire))
toDel := make([]ObjectToDelete, 0, len(toExpire))
oiCache := newObjInfoCache()
for _, exp := range toExpire {
if exp.ExpireAll {
toExpireAll = append(toExpireAll, exp.ObjectInfo)
toExpireAll = append(toExpireAll, exp)
continue
}
// Cache ObjectInfo value via pointers for
@@ -435,14 +445,14 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
oiCache.Add(od, &exp.ObjectInfo)
}
var done bool
// DeleteObject(deletePrefix: true) to expire all versions of an object
for _, exp := range toExpireAll {
var success bool
for attempts := 1; attempts <= retryAttempts; attempts++ {
select {
case <-ctx.Done():
done = true
ri.trackMultipleObjectVersions(exp, success)
return
default:
}
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
@@ -459,14 +469,7 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
break
}
}
ri.trackMultipleObjectVersions(r.Bucket, exp, success)
if done {
break
}
}
if done {
return
ri.trackMultipleObjectVersions(exp, success)
}
// DeleteMultiple objects
@@ -524,7 +527,8 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
type expireObjInfo struct {
ObjectInfo
ExpireAll bool
ExpireAll bool
DeleteMarkerCount int64
}
// Start the batch expiration job, resumes if there was a pending job via "job.ID"
@@ -621,80 +625,115 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
matchedFilter BatchJobExpireFilter
versionsCount int
toDel []expireObjInfo
failed bool
done bool
)
failed := false
for result := range results {
if result.Err != nil {
failed = true
batchLogIf(ctx, result.Err)
continue
deleteMarkerCountMap := map[string]int64{}
pushToExpire := func() {
// set preObject deleteMarkerCount
if len(toDel) > 0 {
lastDelIndex := len(toDel) - 1
lastDel := toDel[lastDelIndex]
if lastDel.ExpireAll {
toDel[lastDelIndex].DeleteMarkerCount = deleteMarkerCountMap[lastDel.Name]
// delete the key
delete(deleteMarkerCountMap, lastDel.Name)
}
}
// Apply filter to find the matching rule to apply expiry
// actions accordingly.
// nolint:gocritic
if result.Item.IsLatest {
// send down filtered entries to be deleted using
// DeleteObjects method
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
xfer := make([]expireObjInfo, len(toDel))
copy(xfer, toDel)
var done bool
select {
case <-ctx.Done():
done = true
case expireCh <- xfer:
toDel = toDel[:0] // resetting toDel
}
if done {
break
}
// send down filtered entries to be deleted using
// DeleteObjects method
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
xfer := make([]expireObjInfo, len(toDel))
copy(xfer, toDel)
select {
case expireCh <- xfer:
toDel = toDel[:0] // resetting toDel
case <-ctx.Done():
done = true
}
var match BatchJobExpireFilter
var found bool
for _, rule := range r.Rules {
if rule.Matches(result.Item, now) {
match = rule
found = true
break
}
}
if !found {
continue
}
prevObj = result.Item
matchedFilter = match
versionsCount = 1
// Include the latest version
if matchedFilter.Purge.RetainVersions == 0 {
toDel = append(toDel, expireObjInfo{
ObjectInfo: result.Item,
ExpireAll: true,
})
continue
}
} else if prevObj.Name == result.Item.Name {
if matchedFilter.Purge.RetainVersions == 0 {
continue // including latest version in toDel suffices, skipping other versions
}
versionsCount++
} else {
continue
}
if versionsCount <= matchedFilter.Purge.RetainVersions {
continue // retain versions
}
toDel = append(toDel, expireObjInfo{
ObjectInfo: result.Item,
})
}
for {
select {
case result, ok := <-results:
if !ok {
done = true
break
}
if result.Err != nil {
failed = true
batchLogIf(ctx, result.Err)
continue
}
if result.Item.DeleteMarker {
deleteMarkerCountMap[result.Item.Name]++
}
// Apply filter to find the matching rule to apply expiry
// actions accordingly.
// nolint:gocritic
if result.Item.IsLatest {
var match BatchJobExpireFilter
var found bool
for _, rule := range r.Rules {
if rule.Matches(result.Item, now) {
match = rule
found = true
break
}
}
if !found {
continue
}
if prevObj.Name != result.Item.Name {
// switch the object
pushToExpire()
}
prevObj = result.Item
matchedFilter = match
versionsCount = 1
// Include the latest version
if matchedFilter.Purge.RetainVersions == 0 {
toDel = append(toDel, expireObjInfo{
ObjectInfo: result.Item,
ExpireAll: true,
})
continue
}
} else if prevObj.Name == result.Item.Name {
if matchedFilter.Purge.RetainVersions == 0 {
continue // including latest version in toDel suffices, skipping other versions
}
versionsCount++
} else {
// switch the object
pushToExpire()
// a file switched with no LatestVersion, logging it
batchLogIf(ctx, fmt.Errorf("skipping object %s, no latest version found", result.Item.Name))
continue
}
if versionsCount <= matchedFilter.Purge.RetainVersions {
continue // retain versions
}
toDel = append(toDel, expireObjInfo{
ObjectInfo: result.Item,
})
pushToExpire()
case <-ctx.Done():
done = true
}
if done {
break
}
}
if context.Cause(ctx) != nil {
xioutil.SafeClose(expireCh)
return context.Cause(ctx)
}
pushToExpire()
// Send any remaining objects downstream
if len(toDel) > 0 {
select {

View File

@@ -39,7 +39,6 @@ import (
"github.com/lithammer/shortuuid/v4"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
@@ -47,7 +46,6 @@ import (
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/pkg/v3/console"
"github.com/minio/pkg/v3/env"
@@ -61,6 +59,8 @@ var globalBatchConfig batch.Config
const (
// Keep the completed/failed job stats 3 days before removing it
oldJobsExpiration = 3 * 24 * time.Hour
redactedText = "**REDACTED**"
)
// BatchJobRequest this is an internal data structure not for external consumption.
@@ -74,6 +74,29 @@ type BatchJobRequest struct {
ctx context.Context `msg:"-"`
}
// RedactSensitive will redact any sensitive information in b.
func (j *BatchJobRequest) RedactSensitive() {
j.Replicate.RedactSensitive()
j.Expire.RedactSensitive()
j.KeyRotate.RedactSensitive()
}
// RedactSensitive will redact any sensitive information in b.
func (r *BatchJobReplicateV1) RedactSensitive() {
if r == nil {
return
}
if r.Target.Creds.SecretKey != "" {
r.Target.Creds.SecretKey = redactedText
}
if r.Target.Creds.SessionToken != "" {
r.Target.Creds.SessionToken = redactedText
}
}
// RedactSensitive will redact any sensitive information in b.
func (r *BatchJobKeyRotateV1) RedactSensitive() {}
func notifyEndpoint(ctx context.Context, ri *batchJobInfo, endpoint, token string) error {
if endpoint == "" {
return nil
@@ -117,7 +140,7 @@ func (r BatchJobReplicateV1) Notify(ctx context.Context, ri *batchJobInfo) error
}
// ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local.
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *minio.Core, srcObjInfo ObjectInfo, retry bool) error {
srcBucket := r.Source.Bucket
tgtBucket := r.Target.Bucket
srcObject := srcObjInfo.Name
@@ -164,7 +187,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
}
return r.copyWithMultipartfromSource(ctx, api, core, srcObjInfo, opts, partsCount)
}
gopts := miniogo.GetObjectOptions{
gopts := minio.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
}
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
@@ -185,7 +208,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
return err
}
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
srcBucket := r.Source.Bucket
tgtBucket := r.Target.Bucket
srcObject := srcObjInfo.Name
@@ -226,7 +249,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
)
for i := 0; i < partsCount; i++ {
gopts := miniogo.GetObjectOptions{
gopts := minio.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
PartNumber: i + 1,
}
@@ -357,7 +380,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
cred := r.Source.Creds
c, err := miniogo.New(u.Host, &miniogo.Options{
c, err := minio.New(u.Host, &minio.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport(),
@@ -368,7 +391,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
}
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
core := &miniogo.Core{Client: c}
core := &minio.Core{Client: c}
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
if err != nil {
@@ -389,14 +412,14 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO
ctx, cancel := context.WithCancel(ctx)
objInfoCh := make(chan miniogo.ObjectInfo, 1)
objInfoCh := make(chan minio.ObjectInfo, 1)
go func() {
prefixes := r.Source.Prefix.F()
if len(prefixes) == 0 {
prefixes = []string{""}
}
for _, prefix := range prefixes {
prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{
prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, minio.ListObjectsOptions{
Prefix: prefix,
WithVersions: minioSrc,
Recursive: true,
@@ -419,7 +442,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
// all user metadata or just storageClass. If its only storageClass
// List() already returns relevant information for filter to be applied.
if isMetadata && !isStorageClassOnly {
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{})
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, minio.StatObjectOptions{})
if err == nil {
oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2)
} else {
@@ -515,7 +538,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
}
// toObjectInfo converts minio.ObjectInfo to ObjectInfo
func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo {
func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
oi := ObjectInfo{
Bucket: bucket,
@@ -597,12 +620,12 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
},
}
opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
opts, _, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
if err != nil {
batchLogIf(ctx, err)
continue
}
// TODO: I am not sure we read it back, but we aren't sending whether checksums are single/multipart.
for k, vals := range opts.Header() {
for _, v := range vals {
snowballObj.Headers.Add(k, v)
@@ -618,7 +641,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
}
// ReplicateToTarget read from source and replicate to configured target
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, retry bool) error {
srcBucket := r.Source.Bucket
tgtBucket := r.Target.Bucket
tgtPrefix := r.Target.Prefix
@@ -627,9 +650,9 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
if srcObjInfo.DeleteMarker || !srcObjInfo.VersionPurgeStatus.Empty() {
if retry && !s3Type {
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.StatObjectOptions{
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.StatObjectOptions{
VersionID: srcObjInfo.VersionID,
Internal: miniogo.AdvancedGetOptions{
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, tgtBucket, pathJoin(tgtPrefix, srcObject))) {
@@ -646,19 +669,19 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
dmVersionID = ""
versionID = ""
}
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.RemoveObjectOptions{
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.RemoveObjectOptions{
VersionID: versionID,
Internal: miniogo.AdvancedRemoveOptions{
Internal: minio.AdvancedRemoveOptions{
ReplicationDeleteMarker: dmVersionID != "",
ReplicationMTime: srcObjInfo.ModTime,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationStatus: minio.ReplicationStatusReplica,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
})
}
if retry && !s3Type { // when we are retrying avoid copying if necessary.
gopts := miniogo.GetObjectOptions{}
gopts := minio.GetObjectOptions{}
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
return err
}
@@ -687,14 +710,14 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
return err
}
putOpts, err := batchReplicationOpts(ctx, "", objInfo)
putOpts, isMP, err := batchReplicationOpts(ctx, "", objInfo)
if err != nil {
return err
}
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
putOpts.Internal = miniogo.AdvancedPutOptions{}
putOpts.Internal = minio.AdvancedPutOptions{}
}
if objInfo.isMultipart() {
if isMP {
if err := replicateObjectWithMultipart(ctx, c, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, objInfo, putOpts); err != nil {
return err
}
@@ -858,21 +881,23 @@ func (ri *batchJobInfo) clone() *batchJobInfo {
defer ri.mu.RUnlock()
return &batchJobInfo{
Version: ri.Version,
JobID: ri.JobID,
JobType: ri.JobType,
RetryAttempts: ri.RetryAttempts,
Complete: ri.Complete,
Failed: ri.Failed,
StartTime: ri.StartTime,
LastUpdate: ri.LastUpdate,
Bucket: ri.Bucket,
Object: ri.Object,
Objects: ri.Objects,
ObjectsFailed: ri.ObjectsFailed,
BytesTransferred: ri.BytesTransferred,
BytesFailed: ri.BytesFailed,
Attempts: ri.Attempts,
Version: ri.Version,
JobID: ri.JobID,
JobType: ri.JobType,
RetryAttempts: ri.RetryAttempts,
Complete: ri.Complete,
Failed: ri.Failed,
StartTime: ri.StartTime,
LastUpdate: ri.LastUpdate,
Bucket: ri.Bucket,
Object: ri.Object,
Objects: ri.Objects,
ObjectsFailed: ri.ObjectsFailed,
DeleteMarkers: ri.DeleteMarkers,
DeleteMarkersFailed: ri.DeleteMarkersFailed,
BytesTransferred: ri.BytesTransferred,
BytesFailed: ri.BytesFailed,
Attempts: ri.Attempts,
}
}
@@ -971,11 +996,22 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati
// Note: to be used only with batch jobs that affect multiple versions through
// a single action. e.g batch-expire has an option to expire all versions of an
// object which matches the given filters.
func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectInfo, success bool) {
func (ri *batchJobInfo) trackMultipleObjectVersions(info expireObjInfo, success bool) {
if ri == nil {
return
}
ri.mu.Lock()
defer ri.mu.Unlock()
if success {
ri.Objects += int64(info.NumVersions)
ri.Bucket = info.Bucket
ri.Object = info.Name
ri.Objects += int64(info.NumVersions) - info.DeleteMarkerCount
ri.DeleteMarkers += info.DeleteMarkerCount
} else {
ri.ObjectsFailed += int64(info.NumVersions)
ri.ObjectsFailed += int64(info.NumVersions) - info.DeleteMarkerCount
ri.DeleteMarkersFailed += info.DeleteMarkerCount
}
}
@@ -1099,7 +1135,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
}
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
return !((r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest)
isSourceOrTargetS3 := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
return !isSourceOrTargetS3 || info.IsLatest
}
u, err := url.Parse(r.Target.Endpoint)
@@ -1109,7 +1146,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
cred := r.Target.Creds
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
c, err := minio.NewCore(u.Host, &minio.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport(),
@@ -1132,14 +1169,14 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
if r.Source.Snowball.Disable != nil && !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
go func() {
// Snowball currently needs the high level minio-go Client, not the Core one
cl, err := miniogo.New(u.Host, &miniogo.Options{
cl, err := minio.New(u.Host, &minio.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport(),
BucketLookup: lookupStyle(r.Target.Path),
})
if err != nil {
batchLogOnceIf(ctx, err, job.ID+"miniogo.New")
batchLogOnceIf(ctx, err, job.ID+"minio.New")
return
}
@@ -1249,7 +1286,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
stopFn := globalBatchJobsMetrics.trace(batchJobMetricReplication, job.ID, attempts)
success := true
if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil {
if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" {
if minio.ToErrorResponse(err).Code == "PreconditionFailed" {
// pre-condition failed means we already have the object copied over.
return
}
@@ -1425,7 +1462,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
cred = r.Source.Creds
remoteBkt = r.Source.Bucket
pathStyle = r.Source.Path
}
u, err := url.Parse(remoteEp)
@@ -1433,7 +1469,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
return err
}
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
c, err := minio.NewCore(u.Host, &minio.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport(),
@@ -1446,7 +1482,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
vcfg, err := c.GetBucketVersioning(ctx, remoteBkt)
if err != nil {
if miniogo.ToErrorResponse(err).Code == "NoSuchBucket" {
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
return batchReplicationJobError{
Code: "NoSuchTargetBucket",
Description: "The specified target bucket does not exist",
@@ -1551,19 +1587,19 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string
return err
}
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) {
// TODO: support custom storage class for remote replication
putOpts, err = putReplicationOpts(ctx, "", objInfo, 0)
putOpts, isMP, err = putReplicationOpts(ctx, "", objInfo)
if err != nil {
return putOpts, err
return putOpts, isMP, err
}
putOpts.Internal = miniogo.AdvancedPutOptions{
putOpts.Internal = minio.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
ReplicationRequest: true,
}
return putOpts, nil
return putOpts, isMP, nil
}
// ListBatchJobs - lists all currently active batch jobs, optionally takes {jobType}
@@ -1695,6 +1731,8 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
return
}
// Remove sensitive fields.
req.RedactSensitive()
buf, err := yaml.Marshal(req)
if err != nil {
batchLogIf(ctx, err)
@@ -1714,7 +1752,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
return
}
buf, err := io.ReadAll(ioutil.HardLimitReader(r.Body, humanize.MiByte*4))
buf, err := io.ReadAll(xioutil.HardLimitReader(r.Body, humanize.MiByte*4))
if err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
@@ -1802,7 +1840,7 @@ func (a adminAPIHandlers) CancelBatchJob(w http.ResponseWriter, r *http.Request)
return
}
if _, success := proxyRequestByToken(ctx, w, r, jobID); success {
if _, proxied, _ := proxyRequestByToken(ctx, w, r, jobID, true); proxied {
return
}
@@ -2109,12 +2147,14 @@ func (ri *batchJobInfo) metric() madmin.JobMetric {
switch ri.JobType {
case string(madmin.BatchJobReplicate):
m.Replicate = &madmin.ReplicateInfo{
Bucket: ri.Bucket,
Object: ri.Object,
Objects: ri.Objects,
ObjectsFailed: ri.ObjectsFailed,
BytesTransferred: ri.BytesTransferred,
BytesFailed: ri.BytesFailed,
Bucket: ri.Bucket,
Object: ri.Object,
Objects: ri.Objects,
DeleteMarkers: ri.DeleteMarkers,
ObjectsFailed: ri.ObjectsFailed,
DeleteMarkersFailed: ri.DeleteMarkersFailed,
BytesTransferred: ri.BytesTransferred,
BytesFailed: ri.BytesFailed,
}
case string(madmin.BatchJobKeyRotate):
m.KeyRotate = &madmin.KeyRotationInfo{
@@ -2125,10 +2165,12 @@ func (ri *batchJobInfo) metric() madmin.JobMetric {
}
case string(madmin.BatchJobExpire):
m.Expired = &madmin.ExpirationInfo{
Bucket: ri.Bucket,
Object: ri.Object,
Objects: ri.Objects,
ObjectsFailed: ri.ObjectsFailed,
Bucket: ri.Bucket,
Object: ri.Object,
Objects: ri.Objects,
DeleteMarkers: ri.DeleteMarkers,
ObjectsFailed: ri.ObjectsFailed,
DeleteMarkersFailed: ri.DeleteMarkersFailed,
}
}
@@ -2274,16 +2316,15 @@ func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int) func
}
}
func lookupStyle(s string) miniogo.BucketLookupType {
var lookup miniogo.BucketLookupType
func lookupStyle(s string) minio.BucketLookupType {
var lookup minio.BucketLookupType
switch s {
case "on":
lookup = miniogo.BucketLookupPath
lookup = minio.BucketLookupPath
case "off":
lookup = miniogo.BucketLookupDNS
lookup = minio.BucketLookupDNS
default:
lookup = miniogo.BucketLookupAuto
lookup = minio.BucketLookupAuto
}
return lookup
}

View File

@@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
if err != nil {
b.Fatal(err)
}
@@ -54,7 +54,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
@@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
object := getRandomObjectName()
// create bucket.
err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
if err != nil {
b.Fatal(err)
}
@@ -90,7 +90,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload.
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
res, err := obj.NewMultipartUpload(b.Context(), bucket, object, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
@@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
}
md5hex := getMD5Hash(textPartData)
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
partInfo, err = obj.PutObjectPart(b.Context(), bucket, object, res.UploadID, j,
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
@@ -130,7 +130,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
@@ -146,7 +146,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
@@ -162,7 +162,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(b.Context())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
@@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{})
err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{})
if err != nil {
b.Fatal(err)
}
@@ -218,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
i := 0
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)

View File

@@ -128,14 +128,20 @@ func closeBitrotReaders(rs []io.ReaderAt) {
}
// Close all the writers.
func closeBitrotWriters(ws []io.Writer) {
for _, w := range ws {
if w != nil {
if bw, ok := w.(io.Closer); ok {
bw.Close()
}
func closeBitrotWriters(ws []io.Writer) []error {
errs := make([]error, len(ws))
for i, w := range ws {
if w == nil {
errs[i] = errDiskNotFound
continue
}
if bw, ok := w.(io.Closer); ok {
errs[i] = bw.Close()
} else {
errs[i] = nil
}
}
return errs
}
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
@@ -178,7 +184,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
return errFileCorrupt
}
bufp := xioutil.ODirectPoolSmall.Get().(*[]byte)
bufp := xioutil.ODirectPoolSmall.Get()
defer xioutil.ODirectPoolSmall.Put(bufp)
for left > 0 {

View File

@@ -18,7 +18,6 @@
package cmd
import (
"context"
"io"
"testing"
)
@@ -34,7 +33,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
t.Fatal(err)
}
disk.MakeVol(context.Background(), volume)
disk.MakeVol(t.Context(), volume)
writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10)

View File

@@ -48,9 +48,7 @@ func (bs *bootstrapTracer) Events() []madmin.TraceInfo {
traceInfo := make([]madmin.TraceInfo, 0, bootstrapTraceLimit)
bs.mu.RLock()
for _, i := range bs.info {
traceInfo = append(traceInfo, i)
}
traceInfo = append(traceInfo, bs.info...)
bs.mu.RUnlock()
return traceInfo

View File

@@ -344,11 +344,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
Created: dnsRecords[0].CreationDate,
})
}
sort.Slice(bucketsInfo, func(i, j int) bool {
return bucketsInfo[i].Name < bucketsInfo[j].Name
})
} else {
// Invoke the list buckets.
var err error
@@ -561,7 +559,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}, goi, opts, gerr)
if dsc.ReplicateAny() {
if object.VersionID != "" {
object.VersionPurgeStatus = Pending
object.VersionPurgeStatus = replication.VersionPurgePending
object.VersionPurgeStatuses = dsc.PendingStatus()
} else {
object.DeleteMarkerReplicationStatus = dsc.PendingStatus()
@@ -671,7 +669,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
continue
}
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) {
if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == replication.VersionPurgePending) {
// copy so we can re-add null ID.
dobj := dobj
if isDirObject(dobj.ObjectName) && dobj.VersionID == "" {
@@ -841,7 +839,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
apiErr := ErrBucketAlreadyExists
if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() {

View File

@@ -188,7 +188,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
// Test for Anonymous/unsigned http request.
@@ -290,7 +289,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// Test for Anonymous/unsigned http request.

View File

@@ -26,7 +26,7 @@ import (
//go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE
type lcEventSrc uint8
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
//nolint:staticcheck,revive // Underscores are used here to indicate where common prefix ends and the enumeration name begins
const (
lcEventSrc_None lcEventSrc = iota
lcEventSrc_Heal

View File

@@ -73,6 +73,10 @@ func NewLifecycleSys() *LifecycleSys {
func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string, metadata map[string]string, err string) madmin.TraceInfo {
sz, _ := oi.GetActualSize()
if metadata == nil {
metadata = make(map[string]string)
}
metadata["version-id"] = oi.VersionID
return madmin.TraceInfo{
TraceType: madmin.TraceILM,
Time: startTime,
@@ -151,8 +155,8 @@ func (f freeVersionTask) OpHash() uint64 {
return xxh3.HashString(f.TransitionedObject.Tier + f.TransitionedObject.Name)
}
func (n newerNoncurrentTask) OpHash() uint64 {
return xxh3.HashString(n.bucket + n.versions[0].ObjectV.ObjectName)
func (n noncurrentVersionsTask) OpHash() uint64 {
return xxh3.HashString(n.bucket + n.versions[0].ObjectName)
}
func (j jentry) OpHash() uint64 {
@@ -236,14 +240,16 @@ func (es *expiryState) enqueueByDays(oi ObjectInfo, event lifecycle.Event, src l
}
}
// enqueueByNewerNoncurrent enqueues object versions expired by
// NewerNoncurrentVersions limit for expiry.
func (es *expiryState) enqueueByNewerNoncurrent(bucket string, versions []ObjectToDelete, lcEvent lifecycle.Event) {
func (es *expiryState) enqueueNoncurrentVersions(bucket string, versions []ObjectToDelete, events []lifecycle.Event) {
if len(versions) == 0 {
return
}
task := newerNoncurrentTask{bucket: bucket, versions: versions, event: lcEvent}
task := noncurrentVersionsTask{
bucket: bucket,
versions: versions,
events: events,
}
wrkr := es.getWorkerCh(task.OpHash())
if wrkr == nil {
es.stats.missedExpiryTasks.Add(1)
@@ -343,8 +349,8 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
} else {
applyExpiryOnNonTransitionedObjects(es.ctx, es.objAPI, v.objInfo, v.event, v.src)
}
case newerNoncurrentTask:
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event)
case noncurrentVersionsTask:
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.events)
case jentry:
transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
case freeVersionTask:
@@ -392,12 +398,10 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
globalExpiryState = newExpiryState(ctx, objectAPI, globalILMConfig.getExpirationWorkers())
}
// newerNoncurrentTask encapsulates arguments required by worker to expire objects
// by NewerNoncurrentVersions
type newerNoncurrentTask struct {
type noncurrentVersionsTask struct {
bucket string
versions []ObjectToDelete
event lifecycle.Event
events []lifecycle.Event
}
type transitionTask struct {
@@ -1104,17 +1108,20 @@ func isRestoredObjectOnDisk(meta map[string]string) (onDisk bool) {
// ToLifecycleOpts returns lifecycle.ObjectOpts value for oi.
func (oi ObjectInfo) ToLifecycleOpts() lifecycle.ObjectOpts {
return lifecycle.ObjectOpts{
Name: oi.Name,
UserTags: oi.UserTags,
VersionID: oi.VersionID,
ModTime: oi.ModTime,
Size: oi.Size,
IsLatest: oi.IsLatest,
NumVersions: oi.NumVersions,
DeleteMarker: oi.DeleteMarker,
SuccessorModTime: oi.SuccessorModTime,
RestoreOngoing: oi.RestoreOngoing,
RestoreExpires: oi.RestoreExpires,
TransitionStatus: oi.TransitionedObject.Status,
Name: oi.Name,
UserTags: oi.UserTags,
VersionID: oi.VersionID,
ModTime: oi.ModTime,
Size: oi.Size,
IsLatest: oi.IsLatest,
NumVersions: oi.NumVersions,
DeleteMarker: oi.DeleteMarker,
SuccessorModTime: oi.SuccessorModTime,
RestoreOngoing: oi.RestoreOngoing,
RestoreExpires: oi.RestoreExpires,
TransitionStatus: oi.TransitionedObject.Status,
UserDefined: oi.UserDefined,
VersionPurgeStatus: oi.VersionPurgeStatus,
ReplicationStatus: oi.ReplicationStatus,
}
}

View File

@@ -243,26 +243,26 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) {
return subToken, nodeIndex
}
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string) (string, bool) {
subToken, nodeIndex := parseRequestToken(token)
if nodeIndex >= 0 {
return subToken, proxyRequestByNodeIndex(ctx, w, r, nodeIndex)
func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string, returnErr bool) (subToken string, proxied bool, success bool) {
var nodeIndex int
if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 {
proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr)
}
return subToken, false
return
}
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int) (success bool) {
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) {
if len(globalProxyEndpoints) == 0 {
return false
return
}
if index < 0 || index >= len(globalProxyEndpoints) {
return false
return
}
ep := globalProxyEndpoints[index]
if ep.IsLocal {
return false
return
}
return proxyRequest(ctx, w, r, ep)
return true, proxyRequest(ctx, w, r, ep, returnErr)
}
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.

View File

@@ -647,9 +647,7 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []string) {
// Reset the state of the BucketMetadataSys.
func (sys *BucketMetadataSys) Reset() {
sys.Lock()
for k := range sys.metadataMap {
delete(sys.metadataMap, k)
}
clear(sys.metadataMap)
sys.Unlock()
}

View File

@@ -555,7 +555,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
outbuf := bytes.NewBuffer(nil)
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
crypto.S3.CreateMetadata(metadata, key, sealedKey)
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
if err != nil {
return output, metabytes, err

View File

@@ -295,7 +295,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
if legalHoldRequested {
var lerr error
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
return mode, retainDate, legalHold, toAPIErrorCode(ctx, lerr)
}
}
@@ -305,7 +305,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header)
if err != nil && !(replica && rMode == "" && rDate.IsZero()) {
if err != nil && (!replica || rMode != "" || !rDate.IsZero()) {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
if retentionPermErr != ErrNone {

View File

@@ -112,7 +112,6 @@ func (r *ReplicationStats) collectWorkerMetrics(ctx context.Context) {
r.wlock.Lock()
r.workers.update()
r.wlock.Unlock()
}
}
}

View File

@@ -125,16 +125,16 @@ func (ri replicatedInfos) VersionPurgeStatus() VersionPurgeStatusType {
completed := 0
for _, v := range ri.Targets {
switch v.VersionPurgeStatus {
case Failed:
return Failed
case Complete:
case replication.VersionPurgeFailed:
return replication.VersionPurgeFailed
case replication.VersionPurgeComplete:
completed++
}
}
if completed == len(ri.Targets) {
return Complete
return replication.VersionPurgeComplete
}
return Pending
return replication.VersionPurgePending
}
func (ri replicatedInfos) VersionPurgeStatusInternal() string {
@@ -380,7 +380,7 @@ func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusT
// CompositeVersionPurgeStatus returns overall replication purge status for the permanent delete being replicated.
func (rs *ReplicationState) CompositeVersionPurgeStatus() VersionPurgeStatusType {
switch VersionPurgeStatusType(rs.VersionPurgeStatusInternal) {
case Pending, Complete, Failed: // for backward compatibility
case replication.VersionPurgePending, replication.VersionPurgeComplete, replication.VersionPurgeFailed: // for backward compatibility
return VersionPurgeStatusType(rs.VersionPurgeStatusInternal)
default:
return getCompositeVersionPurgeStatus(rs.PurgeTargets)
@@ -478,16 +478,16 @@ func getCompositeVersionPurgeStatus(m map[string]VersionPurgeStatusType) Version
completed := 0
for _, v := range m {
switch v {
case Failed:
return Failed
case Complete:
case replication.VersionPurgeFailed:
return replication.VersionPurgeFailed
case replication.VersionPurgeComplete:
completed++
}
}
if completed == len(m) {
return Complete
return replication.VersionPurgeComplete
}
return Pending
return replication.VersionPurgePending
}
// getHealReplicateObjectInfo returns info needed by heal replication in ReplicateObjectInfo
@@ -635,28 +635,7 @@ type ResyncTarget struct {
}
// VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication
type VersionPurgeStatusType string
const (
// Pending - versioned delete replication is pending.
Pending VersionPurgeStatusType = "PENDING"
// Complete - versioned delete replication is now complete, erase version on disk.
Complete VersionPurgeStatusType = "COMPLETE"
// Failed - versioned delete replication failed.
Failed VersionPurgeStatusType = "FAILED"
)
// Empty returns true if purge status was not set.
func (v VersionPurgeStatusType) Empty() bool {
return string(v) == ""
}
// Pending returns true if the version is pending purge.
func (v VersionPurgeStatusType) Pending() bool {
return v == Pending || v == Failed
}
type VersionPurgeStatusType = replication.VersionPurgeStatusType
type replicationResyncer struct {
// map of bucket to their resync status

View File

@@ -915,33 +915,29 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "PurgeTargets")
return
}
{
var zb0004 string
zb0004, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
return
}
za0004 = VersionPurgeStatusType(zb0004)
err = za0004.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
return
}
z.PurgeTargets[za0003] = za0004
}
case "ResetStatusesMap":
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap")
return
}
if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0005)
z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 {
for key := range z.ResetStatusesMap {
delete(z.ResetStatusesMap, key)
}
}
for zb0005 > 0 {
zb0005--
for zb0004 > 0 {
zb0004--
var za0005 string
var za0006 string
za0005, err = dc.ReadString()
@@ -1078,7 +1074,7 @@ func (z *ReplicationState) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "PurgeTargets")
return
}
err = en.WriteString(string(za0004))
err = za0004.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
return
@@ -1154,7 +1150,11 @@ func (z *ReplicationState) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.AppendMapHeader(o, uint32(len(z.PurgeTargets)))
for za0003, za0004 := range z.PurgeTargets {
o = msgp.AppendString(o, za0003)
o = msgp.AppendString(o, string(za0004))
o, err = za0004.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
return
}
}
// string "ResetStatusesMap"
o = append(o, 0xb0, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x4d, 0x61, 0x70)
@@ -1279,35 +1279,31 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "PurgeTargets")
return
}
{
var zb0004 string
zb0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
return
}
za0004 = VersionPurgeStatusType(zb0004)
bts, err = za0004.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
return
}
z.PurgeTargets[za0003] = za0004
}
case "ResetStatusesMap":
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap")
return
}
if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0005)
z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 {
for key := range z.ResetStatusesMap {
delete(z.ResetStatusesMap, key)
}
}
for zb0005 > 0 {
for zb0004 > 0 {
var za0005 string
var za0006 string
zb0005--
zb0004--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap")
@@ -1345,7 +1341,7 @@ func (z *ReplicationState) Msgsize() (s int) {
if z.PurgeTargets != nil {
for za0003, za0004 := range z.PurgeTargets {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(string(za0004))
s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize()
}
}
s += 17 + msgp.MapHeaderSize
@@ -2507,55 +2503,3 @@ func (z *TargetReplicationResyncStatus) Msgsize() (s int) {
s = 1 + 3 + msgp.TimeSize + 4 + msgp.TimeSize + 3 + msgp.StringPrefixSize + len(z.ResyncID) + 4 + msgp.TimeSize + 4 + msgp.IntSize + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len(z.Bucket) + 4 + msgp.StringPrefixSize + len(z.Object)
return
}
// DecodeMsg implements msgp.Decodable
func (z *VersionPurgeStatusType) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 string
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = VersionPurgeStatusType(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z VersionPurgeStatusType) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteString(string(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z VersionPurgeStatusType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *VersionPurgeStatusType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = VersionPurgeStatusType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z VersionPurgeStatusType) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}

View File

@@ -18,7 +18,6 @@
package cmd
import (
"context"
"testing"
"github.com/minio/minio/internal/bucket/replication"
@@ -184,7 +183,7 @@ var parseReplicationDecisionTest = []struct {
func TestParseReplicateDecision(t *testing.T) {
for i, test := range parseReplicationDecisionTest {
dsc, err := parseReplicateDecision(context.Background(), "bucket", test.expDsc.String())
dsc, err := parseReplicateDecision(t.Context(), "bucket", test.expDsc.String())
if err != nil {
if test.expErr != err {
t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr)

View File

@@ -49,6 +49,7 @@ import (
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/once"
"github.com/tinylib/msgp/msgp"
@@ -145,7 +146,7 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re
if errInt == nil {
err = nil
} else {
err = errInt.(error)
err, _ = errInt.(error)
}
}
switch err.(type) {
@@ -389,7 +390,7 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
// can be the case that other cluster is down and duplicate `mc rm --vid`
// is issued - this still needs to be replicated back to the other target
if !oi.VersionPurgeStatus.Empty() {
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
replicate = oi.VersionPurgeStatus == replication.VersionPurgePending || oi.VersionPurgeStatus == replication.VersionPurgeFailed
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
}
continue
@@ -617,7 +618,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
rinfo.ReplicationStatus = rinfo.PrevReplicationStatus
return
}
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == Complete {
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == replication.VersionPurgeComplete {
return
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
@@ -637,7 +638,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if dobj.VersionID == "" {
rinfo.ReplicationStatus = replication.Failed
} else {
rinfo.VersionPurgeStatus = Failed
rinfo.VersionPurgeStatus = replication.VersionPurgeFailed
}
return
}
@@ -661,7 +662,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
case isErrObjectNotFound(serr), isErrVersionNotFound(serr):
// version being purged is already not found on target.
if !rinfo.VersionPurgeStatus.Empty() {
rinfo.VersionPurgeStatus = Complete
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
return
}
case isErrReadQuorum(serr), isErrWriteQuorum(serr):
@@ -694,7 +695,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if dobj.VersionID == "" {
rinfo.ReplicationStatus = replication.Failed
} else {
rinfo.VersionPurgeStatus = Failed
rinfo.VersionPurgeStatus = replication.VersionPurgeFailed
}
replLogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr))
if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
@@ -704,7 +705,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if dobj.VersionID == "" {
rinfo.ReplicationStatus = replication.Completed
} else {
rinfo.VersionPurgeStatus = Complete
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
}
}
return
@@ -773,7 +774,7 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
return "", false
}
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, partNum int) (putOpts minio.PutObjectOptions, err error) {
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) {
meta := make(map[string]string)
isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined)
@@ -794,23 +795,22 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
meta[k] = v
}
}
isMP = objInfo.isMultipart()
if len(objInfo.Checksum) > 0 {
// Add encrypted CRC to metadata for SSE-C objects.
if isSSEC {
meta[ReplicationSsecChecksumHeader] = base64.StdEncoding.EncodeToString(objInfo.Checksum)
} else {
if objInfo.isMultipart() && partNum > 0 {
for _, pi := range objInfo.Parts {
if pi.Number == partNum {
for k, v := range pi.Checksums { // for PutObjectPart
meta[k] = v
}
}
}
} else {
for k, v := range getCRCMeta(objInfo, 0, nil) { // for PutObject/NewMultipartUpload
meta[k] = v
}
cs, mp := getCRCMeta(objInfo, 0, nil)
// Set object checksum.
for k, v := range cs {
meta[k] = v
}
isMP = mp
if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject {
// For objects where checksum is full object, it will be the same.
// Therefore, we use the cheaper PutObject replication.
isMP = false
}
}
}
@@ -841,7 +841,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
if tagTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]; ok {
tagTimestamp, err = time.Parse(time.RFC3339Nano, tagTmstampStr)
if err != nil {
return putOpts, err
return putOpts, false, err
}
}
putOpts.Internal.TaggingTimestamp = tagTimestamp
@@ -865,7 +865,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
rdate, err := amztime.ISO8601Parse(retainDateStr)
if err != nil {
return putOpts, err
return putOpts, false, err
}
putOpts.RetainUntilDate = rdate
// set retention timestamp in opts
@@ -873,7 +873,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
if retainTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]; ok {
retTimestamp, err = time.Parse(time.RFC3339Nano, retainTmstampStr)
if err != nil {
return putOpts, err
return putOpts, false, err
}
}
putOpts.Internal.RetentionTimestamp = retTimestamp
@@ -885,7 +885,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
if lholdTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok {
lholdTimestamp, err = time.Parse(time.RFC3339Nano, lholdTmstampStr)
if err != nil {
return putOpts, err
return putOpts, false, err
}
}
putOpts.Internal.LegalholdTimestamp = lholdTimestamp
@@ -895,9 +895,19 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo, part
}
if crypto.S3KMS.IsEncrypted(objInfo.UserDefined) {
sseEnc, err := encrypt.NewSSEKMS(objInfo.KMSKeyID(), nil)
// If KMS key ID replication is enabled (as by default)
// we include the object's KMS key ID. In any case, we
// always set the SSE-KMS header. If no KMS key ID is
// specified, MinIO is supposed to use whatever default
// config applies on the site or bucket.
var keyID string
if kms.ReplicateKeyID() {
keyID = objInfo.KMSKeyID()
}
sseEnc, err := encrypt.NewSSEKMS(keyID, nil)
if err != nil {
return putOpts, err
return putOpts, false, err
}
putOpts.ServerSideEncryption = sseEnc
}
@@ -958,7 +968,11 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
}
t, _ := tags.ParseObjectTags(oi1.UserTags)
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2.UserTags, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
oi2Map := make(map[string]string)
for k, v := range oi2.UserTags {
oi2Map[k] = v
}
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
return replicateMetadata
}
@@ -1286,7 +1300,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
// use core client to avoid doing multipart on PUT
c := &minio.Core{Client: tgt.Client}
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
putOpts, isMP, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil {
replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
sendEvent(eventArgs{
@@ -1318,7 +1332,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
defer cancel()
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if isMP {
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
} else {
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
@@ -1447,13 +1461,14 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}
rinfo.Duration = time.Since(startTime)
}()
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, minio.StatObjectOptions{
sOpts := minio.StatObjectOptions{
VersionID: objInfo.VersionID,
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
})
}
sOpts.Set(xhttp.AmzTagDirective, "ACCESS")
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, sOpts)
if cerr == nil {
rAction = getReplicationAction(objInfo, oi, ri.OpType)
rinfo.ReplicationStatus = replication.Completed
@@ -1538,19 +1553,30 @@ applyAction:
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
}
if tagTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]; ok {
// default timestamps to ModTime unless present in metadata
lkMap := caseInsensitiveMap(objInfo.UserDefined)
if _, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
dstOpts.Internal.LegalholdTimestamp = objInfo.ModTime
}
if _, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
dstOpts.Internal.RetentionTimestamp = objInfo.ModTime
}
if objInfo.UserTags != "" {
dstOpts.Internal.TaggingTimestamp = objInfo.ModTime
}
if tagTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + TaggingTimestamp); ok {
ondiskTimestamp, err := time.Parse(time.RFC3339, tagTmStr)
if err == nil {
dstOpts.Internal.TaggingTimestamp = ondiskTimestamp
}
}
if retTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]; ok {
if retTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + ObjectLockRetentionTimestamp); ok {
ondiskTimestamp, err := time.Parse(time.RFC3339, retTmStr)
if err == nil {
dstOpts.Internal.RetentionTimestamp = ondiskTimestamp
}
}
if lholdTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok {
if lholdTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + ObjectLockLegalHoldTimestamp); ok {
ondiskTimestamp, err := time.Parse(time.RFC3339, lholdTmStr)
if err == nil {
dstOpts.Internal.LegalholdTimestamp = ondiskTimestamp
@@ -1561,8 +1587,7 @@ applyAction:
replLogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
}
} else {
var putOpts minio.PutObjectOptions
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo, 0)
putOpts, isMP, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil {
replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
sendEvent(eventArgs{
@@ -1593,7 +1618,7 @@ applyAction:
defer cancel()
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if isMP {
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
} else {
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
@@ -1671,7 +1696,8 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
cHeader := http.Header{}
cHeader.Add(xhttp.MinIOSourceReplicationRequest, "true")
if !isSSEC {
for k, v := range getCRCMeta(objInfo, partInfo.Number, nil) {
cs, _ := getCRCMeta(objInfo, partInfo.Number, nil)
for k, v := range cs {
cHeader.Add(k, v)
}
}
@@ -1716,8 +1742,9 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
defer ccancel()
if len(objInfo.Checksum) > 0 {
for k, v := range getCRCMeta(objInfo, 0, nil) {
userMeta[k] = v
cs, _ := getCRCMeta(objInfo, 0, nil)
for k, v := range cs {
userMeta[k] = strings.Split(v, "-")[0]
}
}
_, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
@@ -3336,7 +3363,7 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
}
for arn, st := range roi.TargetPurgeStatuses {
if opts.ARN == "" || opts.ARN == arn {
if !opts.Verbose && st == Complete {
if !opts.Verbose && st == replication.VersionPurgeComplete {
continue
}
t, ok := tgtsMap[arn]
@@ -3435,7 +3462,7 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
// heal delete marker replication failure or versioned delete replication failure
if roi.ReplicationStatus == replication.Pending ||
roi.ReplicationStatus == replication.Failed ||
roi.VersionPurgeStatus == Failed || roi.VersionPurgeStatus == Pending {
roi.VersionPurgeStatus == replication.VersionPurgeFailed || roi.VersionPurgeStatus == replication.VersionPurgePending {
globalReplicationPool.Get().queueReplicaDeleteTask(dv)
return
}
@@ -3723,7 +3750,7 @@ func (p *ReplicationPool) queueMRFHeal() error {
}
func (p *ReplicationPool) initialized() bool {
return !(p == nil || p.objLayer == nil)
return p != nil && p.objLayer != nil
}
// getMRF returns MRF entries for this node.
@@ -3766,9 +3793,9 @@ type validateReplicationDestinationOptions struct {
checkReadyErr sync.Map
}
func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) map[string]string {
func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) (cs map[string]string, isMP bool) {
meta := make(map[string]string)
cs := oi.decryptChecksums(partNum, h)
cs, isMP = oi.decryptChecksums(partNum, h)
for k, v := range cs {
cksum := hash.NewChecksumString(k, v)
if cksum == nil {
@@ -3776,8 +3803,9 @@ func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) map[string]string {
}
if cksum.Valid() {
meta[cksum.Type.Key()] = v
meta[xhttp.AmzChecksumType] = cs[xhttp.AmzChecksumType]
meta[xhttp.AmzChecksumAlgo] = cksum.Type.String()
}
meta[xhttp.AmzChecksumType] = cksum.Type.ObjType()
}
return meta
return meta, isMP
}

View File

@@ -18,7 +18,6 @@
package cmd
import (
"context"
"fmt"
"net/http"
"testing"
@@ -86,7 +85,7 @@ var replicationConfigTests = []struct {
}
func TestReplicationResync(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
for i, test := range replicationConfigTests {
if sync := test.rcfg.Resync(ctx, test.info, test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync {
t.Errorf("Test%d (%s): Resync got %t , want %t", i+1, test.name, sync.mustResync(), test.expectedSync)

View File

@@ -47,6 +47,7 @@ import (
"github.com/minio/console/api/operations"
consoleoauth2 "github.com/minio/console/pkg/auth/idp/oauth2"
consoleCerts "github.com/minio/console/pkg/certs"
"github.com/minio/kms-go/kes"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/set"
@@ -382,7 +383,7 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
}
// Check "no-compat" flag from command line argument.
ctxt.StrictS3Compat = !(ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat"))
ctxt.StrictS3Compat = !ctx.IsSet("no-compat") && !ctx.GlobalIsSet("no-compat")
switch {
case ctx.IsSet("config-dir"):
@@ -717,9 +718,7 @@ func serverHandleEnvVars() {
logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value in environment variable")
}
// Look for if URL has invalid values and return error.
if !((u.Scheme == "http" || u.Scheme == "https") &&
u.Opaque == "" &&
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
if !isValidURLEndpoint((*url.URL)(u)) {
err := fmt.Errorf("URL contains unexpected resources, expected URL to be one of http(s)://console.example.com or as a subpath via API endpoint http(s)://minio.example.com/minio format: %v", u)
logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value is environment variable")
}
@@ -734,9 +733,7 @@ func serverHandleEnvVars() {
logger.Fatal(err, "Invalid MINIO_SERVER_URL value in environment variable")
}
// Look for if URL has invalid values and return error.
if !((u.Scheme == "http" || u.Scheme == "https") &&
(u.Path == "/" || u.Path == "") && u.Opaque == "" &&
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
if !isValidURLEndpoint((*url.URL)(u)) {
err := fmt.Errorf("URL contains unexpected resources, expected URL to be of http(s)://minio.example.com format: %v", u)
logger.Fatal(err, "Invalid MINIO_SERVER_URL value is environment variable")
}
@@ -835,55 +832,83 @@ func serverHandleEnvVars() {
globalEnableSyncBoot = env.Get("MINIO_SYNC_BOOT", config.EnableOff) == config.EnableOn
}
func loadRootCredentials() {
func loadRootCredentials() auth.Credentials {
// At this point, either both environment variables
// are defined or both are not defined.
// Check both cases and authenticate them if correctly defined
var user, password string
var hasCredentials bool
var legacyCredentials bool
//nolint:gocritic
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
user = env.Get(config.EnvRootUser, "")
password = env.Get(config.EnvRootPassword, "")
hasCredentials = true
} else if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
user = env.Get(config.EnvAccessKey, "")
password = env.Get(config.EnvSecretKey, "")
legacyCredentials = true
hasCredentials = true
} else if globalServerCtxt.RootUser != "" && globalServerCtxt.RootPwd != "" {
user, password = globalServerCtxt.RootUser, globalServerCtxt.RootPwd
hasCredentials = true
}
if hasCredentials {
cred, err := auth.CreateCredentials(user, password)
if err != nil {
if legacyCredentials {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate credentials inherited from the shell environment")
} else {
logger.Fatal(config.ErrInvalidRootUserCredentials(err),
"Unable to validate credentials inherited from the shell environment")
}
if user == "" || password == "" {
return auth.Credentials{}
}
cred, err := auth.CreateCredentials(user, password)
if err != nil {
if legacyCredentials {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate credentials inherited from the shell environment")
} else {
logger.Fatal(config.ErrInvalidRootUserCredentials(err),
"Unable to validate credentials inherited from the shell environment")
}
if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
" Please use %s and %s",
config.EnvAccessKey, config.EnvSecretKey,
config.EnvRootUser, config.EnvRootPassword)
logger.Info(color.RedBold(msg))
}
globalActiveCred = cred
globalCredViaEnv = true
} else {
globalActiveCred = auth.DefaultCredentials
}
if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
" Please use %s and %s",
config.EnvAccessKey, config.EnvSecretKey,
config.EnvRootUser, config.EnvRootPassword)
logger.Info(color.RedBold(msg))
}
globalCredViaEnv = true
return cred
}
// autoGenerateRootCredentials generates root credentials deterministically if
// a KMS is configured, no manual credentials have been specified and if root
// access is disabled.
func autoGenerateRootCredentials() auth.Credentials {
if GlobalKMS == nil {
return globalActiveCred
}
var err error
globalNodeAuthToken, err = authenticateNode(globalActiveCred.AccessKey, globalActiveCred.SecretKey)
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
if IsErrIgnored(err, kes.ErrNotAllowed, kms.ErrNotSupported, errors.ErrUnsupported, kms.ErrPermission) {
// If we don't have permission to compute the HMAC, don't change the cred.
return globalActiveCred
}
if err != nil {
logger.Fatal(err, "Unable to generate internode credentials")
logger.Fatal(err, "Unable to generate root access key using KMS")
}
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
if err != nil {
// Here, we must have permission. Otherwise, we would have failed earlier.
logger.Fatal(err, "Unable to generate root secret key using KMS")
}
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
if err != nil {
logger.Fatal(err, "Unable to generate root access key")
}
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
if err != nil {
logger.Fatal(err, "Unable to generate root secret key")
}
logger.Info("Automatically generated root access key and secret key with the KMS")
return auth.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
}
}
@@ -915,7 +940,7 @@ func handleKMSConfig() {
}
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
if !isFile(getPublicCertFile()) || !isFile(getPrivateKeyFile()) {
return nil, nil, false, nil
}

View File

@@ -45,7 +45,7 @@ func Test_readFromSecret(t *testing.T) {
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp("", "testfile")
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil {
t.Error(err)
}
@@ -157,7 +157,7 @@ MINIO_ROOT_PASSWORD=minio123`,
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp("", "testfile")
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil {
t.Error(err)
}

View File

@@ -18,17 +18,13 @@
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
"strings"
"sync"
"github.com/minio/kms-go/kes"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config/browser"
"github.com/minio/minio/internal/kms"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
@@ -570,7 +566,6 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
}
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
case config.CompressionSubSys:
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
@@ -729,47 +724,6 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
return nil
}
// autoGenerateRootCredentials generates root credentials deterministically if
// a KMS is configured, no manual credentials have been specified and if root
// access is disabled.
func autoGenerateRootCredentials() {
if GlobalKMS == nil {
return
}
if globalAPIConfig.permitRootAccess() || !globalActiveCred.Equal(auth.DefaultCredentials) {
return
}
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
if errors.Is(err, kes.ErrNotAllowed) || errors.Is(err, errors.ErrUnsupported) {
return // If we don't have permission to compute the HMAC, don't change the cred.
}
if err != nil {
logger.Fatal(err, "Unable to generate root access key using KMS")
}
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
if err != nil {
// Here, we must have permission. Otherwise, we would have failed earlier.
logger.Fatal(err, "Unable to generate root secret key using KMS")
}
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
if err != nil {
logger.Fatal(err, "Unable to generate root access key")
}
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
if err != nil {
logger.Fatal(err, "Unable to generate root secret key")
}
logger.Info("Automatically generated root access key and secret key with the KMS")
globalActiveCred = auth.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
}
}
// applyDynamicConfig will apply dynamic config values.
// Dynamic systems should be in config.SubSystemsDynamic as well.
func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config) error {

View File

@@ -26,7 +26,7 @@ import (
)
func TestServerConfig(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
objLayer, fsDir, err := prepareFS(ctx)
@@ -56,7 +56,7 @@ func TestServerConfig(t *testing.T) {
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region())
}
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
if err := saveServerConfig(t.Context(), objLayer, globalServerConfig); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}

View File

@@ -25,11 +25,11 @@ import (
"sync/atomic"
"github.com/minio/madmin-go/v3"
"github.com/minio/madmin-go/v3/logger/log"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/logger/target/console"
"github.com/minio/minio/internal/logger/target/types"
"github.com/minio/minio/internal/pubsub"
"github.com/minio/pkg/v3/logger/message/log"
xnet "github.com/minio/pkg/v3/net"
)

View File

@@ -104,6 +104,22 @@ func (p *scannerMetrics) log(s scannerMetric, paths ...string) func(custom map[s
}
}
// time n scanner actions.
// Use for s < scannerMetricLastRealtime
func (p *scannerMetrics) timeN(s scannerMetric) func(n int) func() {
startTime := time.Now()
return func(n int) func() {
return func() {
duration := time.Since(startTime)
atomic.AddUint64(&p.operations[s], uint64(n))
if s < scannerMetricLastRealtime {
p.latency[s].add(duration)
}
}
}
}
// time a scanner action.
// Use for s < scannerMetricLastRealtime
func (p *scannerMetrics) time(s scannerMetric) func() {

View File

@@ -37,9 +37,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config/heal"
"github.com/minio/minio/internal/event"
@@ -664,6 +662,12 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
into.addChild(h)
continue
}
// Adjust the probability of healing.
// This first removes lowest x from the mod check and makes it x times more likely.
// So if duudc = 10 and we want heal check every 50 cycles, we check
// if (cycle/10) % (50/10) == 0, which would make heal checks run once every 50 cycles,
// if the objects are pre-selected as 1:10.
folder.objectHealProbDiv = dataUsageUpdateDirCycles
}
f.updateCurrentPath(folder.name)
stopFn := globalScannerMetrics.log(scannerMetricScanFolder, f.root, folder.name)
@@ -858,8 +862,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
}
}
}
}
if compact {
stop := globalScannerMetrics.log(scannerMetricCompactFolder, folder.name)
f.newCache.deleteRecursive(thisHash)
@@ -873,7 +877,6 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
}
stop(total)
}
}
// Compact if too many children...
if !into.Compacted {
@@ -946,10 +949,7 @@ func (i *scannerItem) transformMetaDir() {
i.objectName = split[len(split)-1]
}
var (
applyActionsLogPrefix = color.Green("applyActions:")
applyVersionActionsLogPrefix = color.Green("applyVersionActions:")
)
var applyActionsLogPrefix = color.Green("applyActions:")
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi ObjectInfo) (size int64) {
if i.debug {
@@ -974,153 +974,8 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object
return 0
}
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) {
size, err := oi.GetActualSize()
if i.debug {
scannerLogIf(ctx, err)
}
if i.lifeCycle == nil {
return action, size
}
versionID := oi.VersionID
var vc *versioning.Versioning
var lr objectlock.Retention
var rcfg *replication.Config
if !isMinioMetaBucketName(i.bucket) {
vc, err = globalBucketVersioningSys.Get(i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
// Check if bucket is object locked.
lr, err = globalBucketObjectLockSys.Get(i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
rcfg, err = getReplicationConfig(ctx, i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
}
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, lr, rcfg, oi)
if i.debug {
if versionID != "" {
console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, lcEvt.Action)
} else {
console.Debugf(applyActionsLogPrefix+" lifecycle: %q Initial scan: %v\n", i.objectPath(), lcEvt.Action)
}
}
switch lcEvt.Action {
// This version doesn't contribute towards sizeS only when it is permanently deleted.
// This can happen when,
// - ExpireObjectAllVersions flag is enabled
// - NoncurrentVersionExpiration is applicable
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
size = 0
case lifecycle.DeleteAction:
// On a non-versioned bucket, DeleteObject removes the only version permanently.
if !vc.PrefixEnabled(oi.Name) {
size = 0
}
}
applyLifecycleAction(lcEvt, lcEventSrc_Scanner, oi)
return lcEvt.Action, size
}
// applyNewerNoncurrentVersionLimit removes noncurrent versions older than the most recent NewerNoncurrentVersions configured.
// Note: This function doesn't update sizeSummary since it always removes versions that it doesn't return.
func (i *scannerItem) applyNewerNoncurrentVersionLimit(ctx context.Context, _ ObjectLayer, fivs []FileInfo, expState *expiryState) ([]ObjectInfo, error) {
done := globalScannerMetrics.time(scannerMetricApplyNonCurrent)
defer done()
rcfg, _ := globalBucketObjectLockSys.Get(i.bucket)
vcfg, _ := globalBucketVersioningSys.Get(i.bucket)
versioned := vcfg != nil && vcfg.Versioned(i.objectPath())
objectInfos := make([]ObjectInfo, 0, len(fivs))
if i.lifeCycle == nil {
for _, fi := range fivs {
objectInfos = append(objectInfos, fi.ToObjectInfo(i.bucket, i.objectPath(), versioned))
}
return objectInfos, nil
}
event := i.lifeCycle.NoncurrentVersionsExpirationLimit(lifecycle.ObjectOpts{Name: i.objectPath()})
lim := event.NewerNoncurrentVersions
if lim == 0 || len(fivs) <= lim+1 { // fewer than lim _noncurrent_ versions
for _, fi := range fivs {
objectInfos = append(objectInfos, fi.ToObjectInfo(i.bucket, i.objectPath(), versioned))
}
return objectInfos, nil
}
overflowVersions := fivs[lim+1:]
// Retain the current version + most recent lim noncurrent versions
for _, fi := range fivs[:lim+1] {
objectInfos = append(objectInfos, fi.ToObjectInfo(i.bucket, i.objectPath(), versioned))
}
toDel := make([]ObjectToDelete, 0, len(overflowVersions))
for _, fi := range overflowVersions {
obj := fi.ToObjectInfo(i.bucket, i.objectPath(), versioned)
// skip versions with object locking enabled
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
if i.debug {
if obj.VersionID != "" {
console.Debugf(applyVersionActionsLogPrefix+" lifecycle: %s v(%s) is locked, not deleting\n", obj.Name, obj.VersionID)
} else {
console.Debugf(applyVersionActionsLogPrefix+" lifecycle: %s is locked, not deleting\n", obj.Name)
}
}
// add this version back to remaining versions for
// subsequent lifecycle policy applications
objectInfos = append(objectInfos, obj)
continue
}
// NoncurrentDays not passed yet.
if time.Now().UTC().Before(lifecycle.ExpectedExpiryTime(obj.SuccessorModTime, event.NoncurrentDays)) {
// add this version back to remaining versions for
// subsequent lifecycle policy applications
objectInfos = append(objectInfos, obj)
continue
}
toDel = append(toDel, ObjectToDelete{
ObjectV: ObjectV{
ObjectName: obj.Name,
VersionID: obj.VersionID,
},
})
}
if len(toDel) > 0 {
expState.enqueueByNewerNoncurrent(i.bucket, toDel, event)
}
return objectInfos, nil
}
// applyVersionActions will apply lifecycle checks on all versions of a scanned item. Returns versions that remain
// after applying lifecycle checks configured.
func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fivs []FileInfo, expState *expiryState) ([]ObjectInfo, error) {
objInfos, err := i.applyNewerNoncurrentVersionLimit(ctx, o, fivs, expState)
if err != nil {
return nil, err
}
// Check if we have many versions after applyNewerNoncurrentVersionLimit.
if len(objInfos) >= int(scannerExcessObjectVersions.Load()) {
func (i *scannerItem) alertExcessiveVersions(remainingVersions int, cumulativeSize int64) {
if remainingVersions >= int(scannerExcessObjectVersions.Load()) {
// Notify object accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectManyVersions,
@@ -1130,7 +985,7 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
},
UserAgent: "Scanner",
Host: globalLocalNodeName,
RespElements: map[string]string{"x-minio-versions": strconv.Itoa(len(objInfos))},
RespElements: map[string]string{"x-minio-versions": strconv.Itoa(remainingVersions)},
})
auditLogInternal(context.Background(), AuditLogOptions{
@@ -1139,15 +994,11 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
Bucket: i.bucket,
Object: i.objectPath(),
Tags: map[string]string{
"x-minio-versions": strconv.Itoa(len(objInfos)),
"x-minio-versions": strconv.Itoa(remainingVersions),
},
})
}
cumulativeSize := int64(0)
for _, objInfo := range objInfos {
cumulativeSize += objInfo.Size
}
// Check if the cumulative size of all versions of this object is high.
if cumulativeSize >= scannerExcessObjectVersionsTotalSize.Load() {
// Notify object accessed via a GET request.
@@ -1160,7 +1011,7 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
UserAgent: "Scanner",
Host: globalLocalNodeName,
RespElements: map[string]string{
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
"x-minio-versions-count": strconv.Itoa(remainingVersions),
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
},
})
@@ -1171,43 +1022,33 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
Bucket: i.bucket,
Object: i.objectPath(),
Tags: map[string]string{
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
"x-minio-versions-count": strconv.Itoa(remainingVersions),
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
},
})
}
return objInfos, nil
}
type actionsAccountingFn func(oi ObjectInfo, sz, actualSz int64, sizeS *sizeSummary)
// applyActions will apply lifecycle checks on to a scanned item.
// The resulting size on disk will always be returned.
// The metadata will be compared to consensus on the object layer before any changes are applied.
// If no metadata is supplied, -1 is returned if no action is taken.
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) (objDeleted bool, size int64) {
done := globalScannerMetrics.time(scannerMetricILM)
var action lifecycle.Action
action, size = i.applyLifecycle(ctx, o, oi)
done()
// Note: objDeleted is true if and only if action ==
// lifecycle.DeleteAllVersionsAction
if action.DeleteAll() {
return true, 0
func (i *scannerItem) applyActions(ctx context.Context, objAPI ObjectLayer, objInfos []ObjectInfo, lr lock.Retention, sizeS *sizeSummary, fn actionsAccountingFn) {
if len(objInfos) == 0 {
return
}
// For instance, an applied lifecycle means we remove/transitioned an object
// from the current deployment, which means we don't have to call healing
// routine even if we are asked to do via heal flag.
if action == lifecycle.NoneAction {
healActions := func(oi ObjectInfo, actualSz int64) int64 {
size := actualSz
if i.heal.enabled {
done := globalScannerMetrics.time(scannerMetricHealCheck)
size = i.applyHealing(ctx, o, oi)
size = i.applyHealing(ctx, objAPI, oi)
done()
if healDeleteDangling {
done := globalScannerMetrics.time(scannerMetricCleanAbandoned)
err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling})
err := objAPI.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling})
done()
if err != nil {
healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath())
@@ -1217,10 +1058,109 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
// replicate only if lifecycle rules are not applied.
done := globalScannerMetrics.time(scannerMetricCheckReplication)
i.healReplication(ctx, o, oi.Clone(), sizeS)
i.healReplication(ctx, oi.Clone(), sizeS)
done()
return size
}
return false, size
vc, err := globalBucketVersioningSys.Get(i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
// start ILM check timer
done := globalScannerMetrics.timeN(scannerMetricILM)
if i.lifeCycle == nil { // no ILM configured, apply healing and replication checks
var cumulativeSize int64
for _, oi := range objInfos {
actualSz, err := oi.GetActualSize()
if err != nil {
scannerLogIf(ctx, err)
continue
}
size := healActions(oi, actualSz)
if fn != nil { // call accountingfn
fn(oi, size, actualSz, sizeS)
}
cumulativeSize += size
}
// end ILM check timer
done(len(objInfos))
i.alertExcessiveVersions(len(objInfos), cumulativeSize)
return
}
objOpts := make([]lifecycle.ObjectOpts, len(objInfos))
for i, oi := range objInfos {
objOpts[i] = oi.ToLifecycleOpts()
}
evaluator := lifecycle.NewEvaluator(*i.lifeCycle).WithLockRetention(&lr).WithReplicationConfig(i.replication.Config)
events, err := evaluator.Eval(objOpts)
if err != nil {
// This error indicates that the objOpts passed to Eval is invalid.
bugLogIf(ctx, err, i.bucket, i.objectPath())
done(len(objInfos)) // end ILM check timer
return
}
done(len(objInfos)) // end ILM check timer
var (
toDel []ObjectToDelete
noncurrentEvents []lifecycle.Event
cumulativeSize int64
)
remainingVersions := len(objInfos)
eventLoop:
for idx, event := range events {
oi := objInfos[idx]
actualSz, err := oi.GetActualSize()
if i.debug {
scannerLogIf(ctx, err)
}
size := actualSz
switch event.Action {
case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
remainingVersions = 0
applyExpiryRule(event, lcEventSrc_Scanner, oi)
break eventLoop
case lifecycle.DeleteAction, lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
if !vc.PrefixEnabled(i.objectPath()) && event.Action == lifecycle.DeleteAction {
remainingVersions--
size = 0
}
applyExpiryRule(event, lcEventSrc_Scanner, oi)
case lifecycle.DeleteVersionAction: // noncurrent versions expiration
opts := objOpts[idx]
remainingVersions--
size = 0
toDel = append(toDel, ObjectToDelete{
ObjectV: ObjectV{
ObjectName: opts.Name,
VersionID: opts.VersionID,
},
})
noncurrentEvents = append(noncurrentEvents, event)
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
applyTransitionRule(event, lcEventSrc_Scanner, oi)
case lifecycle.NoneAction:
size = healActions(oi, actualSz)
}
// NB fn must be called for every object version except if it is
// expired or was a dangling object.
if fn != nil {
fn(oi, size, actualSz, sizeS)
}
cumulativeSize += size
}
if len(toDel) > 0 {
globalExpiryState.enqueueNoncurrentVersions(i.bucket, toDel, noncurrentEvents)
}
i.alertExcessiveVersions(remainingVersions, cumulativeSize)
}
func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr lock.Retention, rcfg *replication.Config, obj ObjectInfo) lifecycle.Event {
@@ -1367,22 +1307,8 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
}
// Apply object, object version, restored object or restored object version action on the given object
func applyExpiryRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) bool {
func applyExpiryRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) {
globalExpiryState.enqueueByDays(obj, event, src)
return true
}
// Perform actions (removal or transitioning of objects), return true the action is successfully performed
func applyLifecycleAction(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) (success bool) {
switch action := event.Action; action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction,
lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction,
lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
success = applyExpiryRule(event, src, obj)
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
success = applyTransitionRule(event, src, obj)
}
return
}
// objectPath returns the prefix and object name.
@@ -1391,7 +1317,7 @@ func (i *scannerItem) objectPath() string {
}
// healReplication will heal a scanned item that has failed replication.
func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
func (i *scannerItem) healReplication(ctx context.Context, oi ObjectInfo, sizeS *sizeSummary) {
if oi.VersionID == "" {
return
}

View File

@@ -18,61 +18,67 @@
package cmd
import (
"context"
"encoding/xml"
"fmt"
"slices"
"strings"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/minio/minio/internal/amztime"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/bucket/versioning"
xhttp "github.com/minio/minio/internal/http"
)
func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
objAPI, disks, err := prepareErasure(context.Background(), 8)
// Prepare object layer
objAPI, disks, err := prepareErasure(t.Context(), 8)
if err != nil {
t.Fatalf("Failed to initialize object layer: %v", err)
}
defer removeRoots(disks)
setObjectLayer(objAPI)
// Prepare bucket metadata
globalBucketMetadataSys = NewBucketMetadataSys()
globalBucketObjectLockSys = &BucketObjectLockSys{}
globalBucketVersioningSys = &BucketVersioningSys{}
es := newExpiryState(context.Background(), objAPI, 0)
workers := []chan expiryOp{make(chan expiryOp)}
es.workers.Store(&workers)
globalExpiryState = es
var wg sync.WaitGroup
wg.Add(1)
expired := make([]ObjectToDelete, 0, 5)
go func() {
defer wg.Done()
workers := globalExpiryState.workers.Load()
for t := range (*workers)[0] {
if t, ok := t.(newerNoncurrentTask); ok {
expired = append(expired, t.versions...)
}
}
}()
lc := lifecycle.Lifecycle{
Rules: []lifecycle.Rule{
{
ID: "max-versions",
Status: "Enabled",
NoncurrentVersionExpiration: lifecycle.NoncurrentVersionExpiration{
NewerNoncurrentVersions: 1,
},
},
},
}
lcXML, err := xml.Marshal(lc)
lcXML := `
<LifecycleConfiguration>
<Rule>
<ID>max-versions</ID>
<Status>Enabled</Status>
<NoncurrentVersionExpiration>
<NewerNoncurrentVersions>2</NewerNoncurrentVersions>
</NoncurrentVersionExpiration>
</Rule>
<Rule>
<ID>delete-all-versions</ID>
<Status>Enabled</Status>
<Filter>
<Tag>
<Key>del-all</Key>
<Value>true</Value>
</Tag>
</Filter>
<Expiration>
<Days>1</Days>
<ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>
</Expiration>
</Rule>
</LifecycleConfiguration>
`
lc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(lcXML))
if err != nil {
t.Fatalf("Failed to marshal lifecycle config: %v", err)
t.Fatalf("Failed to unmarshal lifecycle config: %v", err)
}
vcfg := versioning.Versioning{
Status: "Enabled",
}
@@ -82,33 +88,45 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
}
bucket := "bucket"
obj := "obj-1"
now := time.Now()
meta := BucketMetadata{
Name: bucket,
Created: now,
LifecycleConfigXML: lcXML,
LifecycleConfigXML: []byte(lcXML),
VersioningConfigXML: vcfgXML,
VersioningConfigUpdatedAt: now,
LifecycleConfigUpdatedAt: now,
lifecycleConfig: &lc,
lifecycleConfig: lc,
versioningConfig: &vcfg,
}
globalBucketMetadataSys.Set(bucket, meta)
item := scannerItem{
Path: obj,
bucket: bucket,
prefix: "",
objectName: obj,
lifeCycle: &lc,
}
// Prepare lifecycle expiration workers
es := newExpiryState(t.Context(), objAPI, 0)
globalExpiryState = es
modTime := time.Now()
// Prepare object versions
obj := "obj-1"
// Simulate objects uploaded 30 hours ago
modTime := now.Add(-48 * time.Hour)
uuids := make([]uuid.UUID, 5)
for i := range uuids {
uuids[i] = uuid.UUID([16]byte{15: uint8(i + 1)})
}
fivs := make([]FileInfo, 5)
objInfos := make([]ObjectInfo, 5)
objRetentionMeta := make(map[string]string)
objRetentionMeta[strings.ToLower(xhttp.AmzObjectLockMode)] = string(objectlock.RetCompliance)
// Set retain until date 12 hours into the future
objRetentionMeta[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = amztime.ISO8601Format(now.Add(12 * time.Hour))
/*
objInfos:
version stack for obj-1
v5 uuid-5 modTime
v4 uuid-4 modTime -1m
v3 uuid-3 modTime -2m
v2 uuid-2 modTime -3m
v1 uuid-1 modTime -4m
*/
for i := 0; i < 5; i++ {
fivs[i] = FileInfo{
Volume: bucket,
@@ -119,41 +137,189 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
Size: 1 << 10,
NumVersions: 5,
}
objInfos[i] = fivs[i].ToObjectInfo(bucket, obj, true)
}
versioned := vcfg.Status == "Enabled"
wants := make([]ObjectInfo, 2)
for i, fi := range fivs[:2] {
wants[i] = fi.ToObjectInfo(bucket, obj, versioned)
/*
lrObjInfos: objInfos with following modifications
version stack for obj-1
v2 uuid-2 modTime -3m objRetentionMeta
*/
lrObjInfos := slices.Clone(objInfos)
lrObjInfos[3].UserDefined = objRetentionMeta
var lrWants []ObjectInfo
lrWants = append(lrWants, lrObjInfos[:4]...)
/*
replObjInfos: objInfos with following modifications
version stack for obj-1
v1 uuid-1 modTime -4m "VersionPurgeStatus: replication.VersionPurgePending"
*/
replObjInfos := slices.Clone(objInfos)
replObjInfos[4].VersionPurgeStatus = replication.VersionPurgePending
var replWants []ObjectInfo
replWants = append(replWants, replObjInfos[:3]...)
replWants = append(replWants, replObjInfos[4])
allVersExpObjInfos := slices.Clone(objInfos)
allVersExpObjInfos[0].UserTags = "del-all=true"
replCfg := replication.Config{
Rules: []replication.Rule{
{
ID: "",
Status: "Enabled",
Priority: 1,
Destination: replication.Destination{
ARN: "arn:minio:replication:::dest-bucket",
Bucket: "dest-bucket",
},
},
},
}
gots, err := item.applyNewerNoncurrentVersionLimit(context.TODO(), objAPI, fivs, es)
if err != nil {
t.Fatalf("Failed with err: %v", err)
}
if len(gots) != len(wants) {
t.Fatalf("Expected %d objects but got %d", len(wants), len(gots))
lr := objectlock.Retention{
Mode: objectlock.RetCompliance,
Validity: 12 * time.Hour,
LockEnabled: true,
}
// Close expiry state's channel to inspect object versions enqueued for expiration
close(workers[0])
wg.Wait()
for _, obj := range expired {
switch obj.ObjectV.VersionID {
case uuids[2].String(), uuids[3].String(), uuids[4].String():
default:
t.Errorf("Unexpected versionID being expired: %#v\n", obj)
expiryWorker := func(wg *sync.WaitGroup, readyCh chan<- struct{}, taskCh <-chan expiryOp, gotExpired *[]ObjectToDelete) {
defer wg.Done()
// signal the calling goroutine that the worker is ready tor receive tasks
close(readyCh)
var expired []ObjectToDelete
for t := range taskCh {
switch v := t.(type) {
case noncurrentVersionsTask:
expired = append(expired, v.versions...)
case expiryTask:
expired = append(expired, ObjectToDelete{
ObjectV: ObjectV{
ObjectName: v.objInfo.Name,
VersionID: v.objInfo.VersionID,
},
})
}
}
if len(expired) > 0 {
*gotExpired = expired
}
}
tests := []struct {
replCfg replicationConfig
lr objectlock.Retention
objInfos []ObjectInfo
wants []ObjectInfo
wantExpired []ObjectToDelete
}{
{
// With replication configured, version(s) with PENDING purge status
replCfg: replicationConfig{Config: &replCfg},
objInfos: replObjInfos,
wants: replWants,
wantExpired: []ObjectToDelete{
{ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[3].VersionID}},
},
},
{
// With lock retention configured and version(s) with retention metadata
lr: lr,
objInfos: lrObjInfos,
wants: lrWants,
wantExpired: []ObjectToDelete{
{ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[4].VersionID}},
},
},
{
// With replication configured, but no versions with PENDING purge status
replCfg: replicationConfig{Config: &replCfg},
objInfos: objInfos,
wants: objInfos[:3],
wantExpired: []ObjectToDelete{
{ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[3].VersionID}},
{ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[4].VersionID}},
},
},
{
objInfos: allVersExpObjInfos,
wants: nil,
wantExpired: []ObjectToDelete{{ObjectV: ObjectV{ObjectName: obj, VersionID: allVersExpObjInfos[0].VersionID}}},
},
{
// When no versions are present, in practice this could be an object with only free versions
objInfos: nil,
wants: nil,
wantExpired: nil,
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("TestApplyNewerNoncurrentVersionsLimit-%d", i), func(t *testing.T) {
workers := []chan expiryOp{make(chan expiryOp)}
es.workers.Store(&workers)
workerReady := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
var gotExpired []ObjectToDelete
go expiryWorker(&wg, workerReady, workers[0], &gotExpired)
<-workerReady
item := scannerItem{
Path: obj,
bucket: bucket,
prefix: "",
objectName: obj,
lifeCycle: lc,
replication: test.replCfg,
}
var (
sizeS sizeSummary
gots []ObjectInfo
)
item.applyActions(t.Context(), objAPI, test.objInfos, test.lr, &sizeS, func(oi ObjectInfo, sz, _ int64, _ *sizeSummary) {
if sz != 0 {
gots = append(gots, oi)
}
})
if len(gots) != len(test.wants) {
t.Fatalf("Expected %d objects but got %d", len(test.wants), len(gots))
}
if slices.CompareFunc(gots, test.wants, func(g, w ObjectInfo) int {
if g.VersionID == w.VersionID {
return 0
}
return -1
}) != 0 {
t.Fatalf("Expected %v but got %v", test.wants, gots)
}
// verify the objects to be deleted
close(workers[0])
wg.Wait()
if len(gotExpired) != len(test.wantExpired) {
t.Fatalf("Expected expiry of %d objects but got %d", len(test.wantExpired), len(gotExpired))
}
if slices.CompareFunc(gotExpired, test.wantExpired, func(g, w ObjectToDelete) int {
if g.VersionID == w.VersionID {
return 0
}
return -1
}) != 0 {
t.Fatalf("Expected %v but got %v", test.wantExpired, gotExpired)
}
})
}
}
func TestEvalActionFromLifecycle(t *testing.T) {
// Tests cover only ExpiredObjectDeleteAllVersions and DelMarkerExpiration actions
numVersions := 4
obj := ObjectInfo{
Name: "foo",
ModTime: time.Now().Add(-31 * 24 * time.Hour),
Size: 100 << 20,
VersionID: uuid.New().String(),
IsLatest: true,
NumVersions: 4,
NumVersions: numVersions,
}
delMarker := ObjectInfo{
Name: "foo-deleted",
@@ -162,8 +328,9 @@ func TestEvalActionFromLifecycle(t *testing.T) {
VersionID: uuid.New().String(),
IsLatest: true,
DeleteMarker: true,
NumVersions: 4,
NumVersions: numVersions,
}
deleteAllILM := `<LifecycleConfiguration>
<Rule>
<Expiration>
@@ -195,35 +362,35 @@ func TestEvalActionFromLifecycle(t *testing.T) {
}
tests := []struct {
ilm lifecycle.Lifecycle
retention lock.Retention
retention *objectlock.Retention
obj ObjectInfo
want lifecycle.Action
}{
{
// with object locking
ilm: *deleteAllLc,
retention: lock.Retention{LockEnabled: true},
retention: &objectlock.Retention{LockEnabled: true},
obj: obj,
want: lifecycle.NoneAction,
},
{
// without object locking
ilm: *deleteAllLc,
retention: lock.Retention{},
retention: &objectlock.Retention{},
obj: obj,
want: lifecycle.DeleteAllVersionsAction,
},
{
// with object locking
ilm: *delMarkerLc,
retention: lock.Retention{LockEnabled: true},
retention: &objectlock.Retention{LockEnabled: true},
obj: delMarker,
want: lifecycle.NoneAction,
},
{
// without object locking
ilm: *delMarkerLc,
retention: lock.Retention{},
retention: &objectlock.Retention{},
obj: delMarker,
want: lifecycle.DelMarkerDeleteAllVersionsAction,
},
@@ -231,8 +398,9 @@ func TestEvalActionFromLifecycle(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprintf("TestEvalAction-%d", i), func(t *testing.T) {
if got := evalActionFromLifecycle(context.TODO(), test.ilm, test.retention, nil, test.obj); got.Action != test.want {
t.Fatalf("Expected %v but got %v", test.want, got)
gotEvent := evalActionFromLifecycle(t.Context(), test.ilm, *test.retention, nil, test.obj)
if gotEvent.Action != test.want {
t.Fatalf("Expected %v but got %v", test.want, gotEvent.Action)
}
})
}

View File

@@ -118,7 +118,6 @@ func (ats *allTierStats) populateStats(stats map[string]madmin.TierStats) {
NumObjects: st.NumObjects,
}
}
return
}
// tierStats holds per-tier stats of a remote tier.

View File

@@ -70,7 +70,7 @@ func TestDataUsageUpdate(t *testing.T) {
})
weSleep := func() bool { return false }
got, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
got, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
if err != nil {
t.Fatal(err)
}
@@ -180,7 +180,7 @@ func TestDataUsageUpdate(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(context.Background(), nil, &xls, got, getSize, 0, weSleep)
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
t.Fatal(err)
@@ -294,7 +294,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil
})
got, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep)
got, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep)
if err != nil {
t.Fatal(err)
}
@@ -429,7 +429,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(context.Background(), nil, &xls, got, getSize, 0, weSleep)
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
t.Fatal(err)
@@ -582,7 +582,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil
})
weSleep := func() bool { return false }
want, err := scanDataFolder(context.Background(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
want, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep)
if err != nil {
t.Fatal(err)
}

View File

@@ -165,7 +165,7 @@ func TestCmpReaders(t *testing.T) {
r1 := bytes.NewReader([]byte("abc"))
r2 := bytes.NewReader([]byte("abc"))
ok, msg := cmpReaders(r1, r2)
if !(ok && msg == "") {
if !ok || msg != "" {
t.Fatalf("unexpected")
}
}

View File

@@ -98,7 +98,6 @@ func (dt *dynamicTimeout) logEntry(duration time.Duration) {
// We leak entries while we copy
if entries == dynamicTimeoutLogSize {
// Make copy on stack in order to call adjust()
logCopy := [dynamicTimeoutLogSize]time.Duration{}
copy(logCopy[:], dt.log[:])

View File

@@ -167,7 +167,6 @@ func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() fl
const successTimeout = 20 * time.Second
for i := 0; i < dynamicTimeoutLogSize; i++ {
rnd := f()
duration := time.Duration(float64(successTimeout) * rnd)

View File

@@ -293,7 +293,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
return err
}
sealedKey = objectKey.Seal(newKey.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, newKey.KeyID, newKey.Ciphertext, sealedKey)
crypto.S3.CreateMetadata(metadata, newKey, sealedKey)
return nil
case crypto.S3KMS:
if GlobalKMS == nil {
@@ -333,7 +333,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
}
sealedKey := objectKey.Seal(newKey.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3KMS.String(), bucket, object)
crypto.S3KMS.CreateMetadata(metadata, newKey.KeyID, newKey.Ciphertext, sealedKey, cryptoCtx)
crypto.S3KMS.CreateMetadata(metadata, newKey, sealedKey, cryptoCtx)
return nil
case crypto.SSEC:
sealedKey, err := crypto.SSEC.ParseMetadata(metadata)
@@ -347,8 +347,8 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
return errInvalidSSEParameters // AWS returns special error for equal but invalid keys.
}
return crypto.ErrInvalidCustomerKey // To provide strict AWS S3 compatibility we return: access denied.
}
if subtle.ConstantTimeCompare(oldKey, newKey) == 1 && sealedKey.Algorithm == crypto.SealAlgorithm {
return nil // don't rotate on equal keys if seal algorithm is latest
}
@@ -376,7 +376,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
crypto.S3.CreateMetadata(metadata, key, sealedKey)
return objectKey, nil
case crypto.S3KMS:
if GlobalKMS == nil {
@@ -409,7 +409,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3KMS.String(), bucket, object)
crypto.S3KMS.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey, cryptoCtx)
crypto.S3KMS.CreateMetadata(metadata, key, sealedKey, cryptoCtx)
return objectKey, nil
case crypto.SSEC:
objectKey := crypto.GenerateKey(key, rand.Reader)
@@ -1015,7 +1015,7 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
if encrypted {
if crypto.SSEC.IsEncrypted(info.UserDefined) {
if !(crypto.SSEC.IsRequested(headers) || crypto.SSECopy.IsRequested(headers)) {
if !crypto.SSEC.IsRequested(headers) && !crypto.SSECopy.IsRequested(headers) {
if r.Header.Get(xhttp.MinIOSourceReplicationRequest) != "true" {
return encrypted, errEncryptedObject
}
@@ -1112,7 +1112,6 @@ func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
o.Parts[i].Checksums = cs[i]
}
}
return
}
// metadataEncryptFn provides an encryption function for metadata.
@@ -1155,16 +1154,17 @@ func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn
// decryptChecksums will attempt to decode checksums and return it/them if set.
// if part > 0, and we have the checksum for the part that will be returned.
func (o *ObjectInfo) decryptChecksums(part int, h http.Header) map[string]string {
// Returns whether the checksum (main part 0) is a multipart checksum.
func (o *ObjectInfo) decryptChecksums(part int, h http.Header) (cs map[string]string, isMP bool) {
data := o.Checksum
if len(data) == 0 {
return nil
return nil, false
}
if part > 0 && !crypto.SSEC.IsEncrypted(o.UserDefined) {
// already decrypted in ToObjectInfo for multipart objects
for _, pi := range o.Parts {
if pi.Number == part {
return pi.Checksums
return pi.Checksums, true
}
}
}
@@ -1174,7 +1174,7 @@ func (o *ObjectInfo) decryptChecksums(part int, h http.Header) map[string]string
if err != crypto.ErrSecretKeyMismatch {
encLogIf(GlobalContext, err)
}
return nil
return nil, part > 0
}
data = decrypted
}

View File

@@ -362,7 +362,6 @@ func TestGetDecryptedRange(t *testing.T) {
t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps)
}
}
}
// Multipart object tests
@@ -538,7 +537,6 @@ func TestGetDecryptedRange(t *testing.T) {
i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef)
}
}
}
}

View File

@@ -138,6 +138,17 @@ func (endpoint *Endpoint) SetDiskIndex(i int) {
endpoint.DiskIdx = i
}
func isValidURLEndpoint(u *url.URL) bool {
// URL style of endpoint.
// Valid URL style endpoint is
// - Scheme field must contain "http" or "https"
// - All field should be empty except Host and Path.
isURLOk := (u.Scheme == "http" || u.Scheme == "https") &&
u.User == nil && u.Opaque == "" && !u.ForceQuery &&
u.RawQuery == "" && u.Fragment == ""
return isURLOk
}
// NewEndpoint - returns new endpoint based on given arguments.
func NewEndpoint(arg string) (ep Endpoint, e error) {
// isEmptyPath - check whether given path is not empty.
@@ -157,8 +168,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
// Valid URL style endpoint is
// - Scheme field must contain "http" or "https"
// - All field should be empty except Host and Path.
if !((u.Scheme == "http" || u.Scheme == "https") &&
u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
if !isValidURLEndpoint(u) {
return ep, fmt.Errorf("invalid URL endpoint format")
}
@@ -213,7 +223,6 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
u.Path = u.Path[1:]
}
}
} else {
// Only check if the arg is an ip address and ask for scheme since its absent.
// localhost, example.com, any FQDN cannot be disambiguated from a regular file path such as
@@ -605,11 +614,8 @@ func (endpoints Endpoints) UpdateIsLocal() error {
startTime := time.Now()
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
for {
for !foundLocal && (epsResolved != len(endpoints)) {
// Break if the local endpoint is found already Or all the endpoints are resolved.
if foundLocal || (epsResolved == len(endpoints)) {
break
}
// Retry infinitely on Kubernetes and Docker swarm.
// This is needed as the remote hosts are sometime
@@ -795,11 +801,8 @@ func (p PoolEndpointList) UpdateIsLocal() error {
startTime := time.Now()
keepAliveTicker := time.NewTicker(1 * time.Second)
defer keepAliveTicker.Stop()
for {
for !foundLocal && (epsResolved != epCount) {
// Break if the local endpoint is found already Or all the endpoints are resolved.
if foundLocal || (epsResolved == epCount) {
break
}
// Retry infinitely on Kubernetes and Docker swarm.
// This is needed as the remote hosts are sometime
@@ -1194,7 +1197,7 @@ func GetProxyEndpointLocalIndex(proxyEps []ProxyEndpoint) int {
}
// GetProxyEndpoints - get all endpoints that can be used to proxy list request.
func GetProxyEndpoints(endpointServerPools EndpointServerPools) []ProxyEndpoint {
func GetProxyEndpoints(endpointServerPools EndpointServerPools, transport http.RoundTripper) []ProxyEndpoint {
var proxyEps []ProxyEndpoint
proxyEpSet := set.NewStringSet()
@@ -1213,7 +1216,7 @@ func GetProxyEndpoints(endpointServerPools EndpointServerPools) []ProxyEndpoint
proxyEps = append(proxyEps, ProxyEndpoint{
Endpoint: endpoint,
Transport: globalRemoteTargetTransport,
Transport: transport,
})
}
}

View File

@@ -201,7 +201,6 @@ func erasureSelfTest() {
ok = false
continue
}
}
}
if !ok {

View File

@@ -89,7 +89,7 @@ func TestErasureDecode(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
}
@@ -108,7 +108,7 @@ func TestErasureDecode(t *testing.T) {
for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
}
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
n, err := erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
t.Fatalf("Test %d: failed to create erasure test file: %v", i, err)
@@ -134,7 +134,7 @@ func TestErasureDecode(t *testing.T) {
}
writer := bytes.NewBuffer(nil)
_, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
_, err = erasure.Decode(t.Context(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
closeBitrotReaders(bitrotReaders)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
@@ -177,7 +177,7 @@ func TestErasureDecode(t *testing.T) {
bitrotReaders[0] = nil
}
writer.Reset()
_, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
_, err = erasure.Decode(t.Context(), writer, bitrotReaders, test.offset, test.length, test.data, nil)
closeBitrotReaders(bitrotReaders)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
@@ -211,7 +211,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
return
}
disks := setup.disks
erasure, err := NewErasure(context.Background(), dataBlocks, parityBlocks, blockSize)
erasure, err := NewErasure(t.Context(), dataBlocks, parityBlocks, blockSize)
if err != nil {
t.Fatalf("failed to create ErasureStorage: %v", err)
}
@@ -236,7 +236,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
// Create a test file to read from.
buffer := make([]byte, blockSize, 2*blockSize)
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
n, err := erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
t.Fatal(err)
@@ -266,7 +266,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
tillOffset := erasure.ShardFileOffset(offset, readLen, length)
bitrotReaders[index] = newStreamingBitrotReader(disk, nil, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
}
_, err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil)
_, err = erasure.Decode(t.Context(), buf, bitrotReaders, offset, readLen, length, nil)
closeBitrotReaders(bitrotReaders)
if err != nil {
t.Fatal(err, offset, readLen)

View File

@@ -88,7 +88,7 @@ func TestErasureEncode(t *testing.T) {
t.Fatalf("Test %d: failed to create test setup: %v", i, err)
}
disks := setup.disks
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize)
if err != nil {
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
}
@@ -105,7 +105,7 @@ func TestErasureEncode(t *testing.T) {
}
writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
}
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
n, err := erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil && !test.shouldFail {
t.Errorf("Test %d: should pass but failed with: %v", i, err)
@@ -140,7 +140,7 @@ func TestErasureEncode(t *testing.T) {
if test.offDisks > 0 {
writers[0] = nil
}
n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
n, err = erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil && !test.shouldFailQuorum {
t.Errorf("Test %d: should pass but failed with: %v", i, err)

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"os"
@@ -75,7 +74,7 @@ func TestErasureHeal(t *testing.T) {
t.Fatalf("Test %d: failed to setup Erasure environment: %v", i, err)
}
disks := setup.disks
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
if err != nil {
t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err)
}
@@ -88,7 +87,7 @@ func TestErasureHeal(t *testing.T) {
for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "", "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
}
_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
_, err = erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers)
if err != nil {
t.Fatalf("Test %d: failed to create random test data: %v", i, err)
@@ -132,7 +131,7 @@ func TestErasureHeal(t *testing.T) {
}
// test case setup is complete - now call Heal()
err = erasure.Heal(context.Background(), staleWriters, readers, test.size, nil)
err = erasure.Heal(t.Context(), staleWriters, readers, test.size, nil)
closeBitrotReaders(readers)
closeBitrotWriters(staleWriters)
if err != nil && !test.shouldFail {

View File

@@ -323,12 +323,7 @@ func checkObjectWithAllParts(ctx context.Context, onlineDisks []StorageAPI, part
}
}
erasureDistributionReliable := true
if inconsistent > len(partsMetadata)/2 {
// If there are too many inconsistent files, then we can't trust erasure.Distribution (most likely
// because of bugs found in CopyObject/PutObjectTags) https://github.com/minio/minio/pull/10772
erasureDistributionReliable = false
}
erasureDistributionReliable := inconsistent <= len(partsMetadata)/2
metaErrs := make([]error, len(errs))

View File

@@ -152,7 +152,7 @@ func TestListOnlineDisks(t *testing.T) {
t.Skip()
}
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, disks, err := prepareErasure16(ctx)
@@ -160,7 +160,7 @@ func TestListOnlineDisks(t *testing.T) {
t.Fatalf("Prepare Erasure backend failed - %v", err)
}
setObjectLayer(obj)
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(disks)
type tamperKind int
@@ -276,7 +276,7 @@ func TestListOnlineDisks(t *testing.T) {
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
dErr := erasureDisks[index].Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -304,7 +304,6 @@ func TestListOnlineDisks(t *testing.T) {
f.Close()
break
}
}
rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount
@@ -329,7 +328,7 @@ func TestListOnlineDisks(t *testing.T) {
// TestListOnlineDisksSmallObjects - checks if listOnlineDisks and outDatedDisks
// are consistent with each other.
func TestListOnlineDisksSmallObjects(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, disks, err := prepareErasure16(ctx)
@@ -337,7 +336,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
t.Fatalf("Prepare Erasure backend failed - %v", err)
}
setObjectLayer(obj)
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(disks)
type tamperKind int
@@ -457,7 +456,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
// and check if that disk
// appears in outDatedDisks.
tamperedIndex = index
dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
dErr := erasureDisks[index].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -485,7 +484,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
f.Close()
break
}
}
rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount
@@ -509,14 +507,14 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
}
func TestDisksWithAllParts(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, disks, err := prepareErasure16(ctx)
if err != nil {
t.Fatalf("Prepare Erasure backend failed - %v", err)
}
setObjectLayer(obj)
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(disks)
bucket := "bucket"

View File

@@ -55,10 +55,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string,
return errors.New("listAndHeal: No non-healing drives found")
}
expectedDisks := len(disks)/2 + 1
fallbackDisks := disks[expectedDisks:]
disks = disks[:expectedDisks]
// How to resolve partial results.
resolver := metadataResolutionParams{
dirQuorum: 1,
@@ -75,7 +71,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string,
lopts := listPathRawOptions{
disks: disks,
fallbackDisks: fallbackDisks,
bucket: bucket,
path: path,
filterPrefix: filterPrefix,
@@ -584,7 +579,6 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
readers[i] = newBitrotReader(disk, copyPartsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo,
checksumInfo.Hash, erasure.ShardSize())
prefer[i] = disk.Hostname() == ""
}
writers := make([]io.Writer, len(outDatedDisks))
for i, disk := range outDatedDisks {
@@ -609,7 +603,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
// later to the final location.
err = erasure.Heal(ctx, writers, readers, partSize, prefer)
closeBitrotReaders(readers)
closeBitrotWriters(writers)
closeErrs := closeBitrotWriters(writers)
if err != nil {
return result, err
}
@@ -629,6 +623,13 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
continue
}
// A non-nil stale disk which got error on Close()
if closeErrs[i] != nil {
outDatedDisks[i] = nil
disksToHealCount--
continue
}
partsMetadata[i].DataDir = dstDataDir
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partModTime, partIdx, partChecksums)
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
@@ -643,9 +644,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
if disksToHealCount == 0 {
return result, fmt.Errorf("all drives had write errors, unable to heal %s/%s", bucket, object)
}
}
}
defer er.deleteAll(context.Background(), minioMetaTmpBucket, tmpID)
@@ -939,12 +938,12 @@ func isObjectDirDangling(errs []error) (ok bool) {
var foundNotEmpty int
var otherFound int
for _, readErr := range errs {
switch {
case readErr == nil:
switch readErr {
case nil:
found++
case readErr == errFileNotFound || readErr == errVolumeNotFound:
case errFileNotFound, errVolumeNotFound:
notFound++
case readErr == errVolumeNotEmpty:
case errVolumeNotEmpty:
foundNotEmpty++
default:
otherFound++

View File

@@ -311,14 +311,14 @@ func TestIsObjectDangling(t *testing.T) {
// Tests both object and bucket healing.
func TestHealing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
@@ -353,7 +353,7 @@ func TestHealing(t *testing.T) {
}
disk := er.getDisks()[0]
fileInfoPreHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err := disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -376,7 +376,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err := disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -395,7 +395,7 @@ func TestHealing(t *testing.T) {
// gone down when an object was replaced by a new object.
fileInfoOutDated := fileInfoPreHeal
fileInfoOutDated.ModTime = time.Now()
err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated)
err = disk.WriteMetadata(t.Context(), "", bucket, object, fileInfoOutDated)
if err != nil {
t.Fatal(err)
}
@@ -405,7 +405,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -457,7 +457,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err)
}
// Stat the bucket to make sure that it was created.
_, err = er.getDisks()[0].StatVol(context.Background(), bucket)
_, err = er.getDisks()[0].StatVol(t.Context(), bucket)
if err != nil {
t.Fatal(err)
}
@@ -465,14 +465,14 @@ func TestHealing(t *testing.T) {
// Tests both object and bucket healing.
func TestHealingVersioned(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
// initialize the server and obtain the credentials and root.
// credentials are necessary to sign the HTTP request.
@@ -513,11 +513,11 @@ func TestHealingVersioned(t *testing.T) {
}
disk := er.getDisks()[0]
fileInfoPreHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal1, err := disk.ReadVersion(t.Context(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
fileInfoPreHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal2, err := disk.ReadVersion(t.Context(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -540,11 +540,11 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal1, err := disk.ReadVersion(t.Context(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
fileInfoPostHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal2, err := disk.ReadVersion(t.Context(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -566,7 +566,7 @@ func TestHealingVersioned(t *testing.T) {
// gone down when an object was replaced by a new object.
fileInfoOutDated := fileInfoPreHeal1
fileInfoOutDated.ModTime = time.Now()
err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated)
err = disk.WriteMetadata(t.Context(), "", bucket, object, fileInfoOutDated)
if err != nil {
t.Fatal(err)
}
@@ -576,7 +576,7 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal1, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal1, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -586,7 +586,7 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal("HealObject failed")
}
fileInfoPostHeal2, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal2, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -638,14 +638,14 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err)
}
// Stat the bucket to make sure that it was created.
_, err = er.getDisks()[0].StatVol(context.Background(), bucket)
_, err = er.getDisks()[0].StatVol(t.Context(), bucket)
if err != nil {
t.Fatal(err)
}
}
func TestHealingDanglingObject(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@@ -724,7 +724,7 @@ func TestHealingDanglingObject(t *testing.T) {
// Restore...
setDisks(orgDisks[:4]...)
fileInfoPreHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err := disks[0].ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -741,7 +741,7 @@ func TestHealingDanglingObject(t *testing.T) {
t.Fatal(err)
}
fileInfoPostHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err := disks[0].ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -771,7 +771,7 @@ func TestHealingDanglingObject(t *testing.T) {
setDisks(orgDisks[:4]...)
disk := getDisk(0)
fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -789,7 +789,7 @@ func TestHealingDanglingObject(t *testing.T) {
}
disk = getDisk(0)
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -820,7 +820,7 @@ func TestHealingDanglingObject(t *testing.T) {
setDisks(orgDisks[:4]...)
disk = getDisk(0)
fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPreHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -838,7 +838,7 @@ func TestHealingDanglingObject(t *testing.T) {
}
disk = getDisk(0)
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil {
t.Fatal(err)
}
@@ -849,7 +849,7 @@ func TestHealingDanglingObject(t *testing.T) {
}
func TestHealCorrectQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@@ -933,7 +933,7 @@ func TestHealCorrectQuorum(t *testing.T) {
}
for i := 0; i < nfi.Erasure.ParityBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -960,7 +960,7 @@ func TestHealCorrectQuorum(t *testing.T) {
}
for i := 0; i < nfi.Erasure.ParityBlocks; i++ {
erasureDisks[i].Delete(context.Background(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -980,7 +980,7 @@ func TestHealCorrectQuorum(t *testing.T) {
}
func TestHealObjectCorruptedPools(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@@ -1044,7 +1044,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
er := set.sets[0]
erasureDisks := er.getDisks()
firstDisk := erasureDisks[0]
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -1063,11 +1063,11 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
}
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -1075,7 +1075,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Errorf("Failure during deleting part.1 - %v", err)
}
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{})
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{})
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
@@ -1095,7 +1095,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Fatalf("FileInfo not equal after healing: %v != %v", fi, nfi)
}
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -1104,7 +1104,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
}
bdata := bytes.Repeat([]byte("b"), int(nfi.Size))
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata)
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata)
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
@@ -1127,7 +1127,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
// Test 4: checks if HealObject returns an error when xl.meta is not found
// in more than read quorum number of disks, to create a corrupted situation.
for i := 0; i <= nfi.Erasure.DataBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -1148,7 +1148,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
}
for i := 0; i < (nfi.Erasure.DataBlocks + nfi.Erasure.ParityBlocks); i++ {
stats, _ := erasureDisks[i].StatInfoFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false)
stats, _ := erasureDisks[i].StatInfoFile(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), false)
if len(stats) != 0 {
t.Errorf("Expected xl.meta file to be not present, but succeeded")
}
@@ -1156,7 +1156,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
}
func TestHealObjectCorruptedXLMeta(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@@ -1222,7 +1222,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -1235,7 +1235,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
}
@@ -1250,7 +1250,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
}
// Test 2: Test with a corrupted xl.meta
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), []byte("abcd"))
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), []byte("abcd"))
if err != nil {
t.Errorf("Failure during creating part.1 - %v", err)
}
@@ -1273,7 +1273,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
// Test 3: checks if HealObject returns an error when xl.meta is not found
// in more than read quorum number of disks, to create a corrupted situation.
for i := 0; i <= nfi2.Erasure.DataBlocks; i++ {
erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -1295,7 +1295,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
}
func TestHealObjectCorruptedParts(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
resetGlobalHealState()
@@ -1362,18 +1362,18 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
part1Disk1Origin, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Disk1Origin, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
part1Disk2Origin, err := secondDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Disk2Origin, err := secondDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
// Test 1, remove part.1
err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{
Recursive: false,
Immediate: false,
})
@@ -1386,7 +1386,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
part1Replaced, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Replaced, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@@ -1396,7 +1396,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
}
// Test 2, Corrupt part.1
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
if err != nil {
t.Fatalf("Failed to write a file - %v", err)
}
@@ -1406,7 +1406,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
part1Replaced, err = firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
part1Replaced, err = firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@@ -1416,12 +1416,12 @@ func TestHealObjectCorruptedParts(t *testing.T) {
}
// Test 3, Corrupt one part and remove data in another disk
err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes"))
if err != nil {
t.Fatalf("Failed to write a file - %v", err)
}
err = secondDisk.Delete(context.Background(), bucket, object, DeleteOptions{
err = secondDisk.Delete(t.Context(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
@@ -1434,7 +1434,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
partReconstructed, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
partReconstructed, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@@ -1443,7 +1443,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
t.Fatalf("part.1 not healed correctly")
}
partReconstructed, err = secondDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"))
partReconstructed, err = secondDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil {
t.Fatalf("Failed to read a file - %v", err)
}
@@ -1455,7 +1455,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
// Tests healing of object.
func TestHealObjectErasure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16
@@ -1512,7 +1512,7 @@ func TestHealObjectErasure(t *testing.T) {
}
// Delete the whole object folder
err = firstDisk.Delete(context.Background(), bucket, object, DeleteOptions{
err = firstDisk.Delete(t.Context(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
@@ -1525,7 +1525,7 @@ func TestHealObjectErasure(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err)
}
if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
}
@@ -1534,7 +1534,7 @@ func TestHealObjectErasure(t *testing.T) {
er.getDisks = func() []StorageAPI {
// Nil more than half the disks, to remove write quorum.
for i := 0; i <= len(erasureDisks)/2; i++ {
err := erasureDisks[i].Delete(context.Background(), bucket, object, DeleteOptions{
err := erasureDisks[i].Delete(t.Context(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
@@ -1560,7 +1560,7 @@ func TestHealObjectErasure(t *testing.T) {
// Tests healing of empty directories
func TestHealEmptyDirectoryErasure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16
@@ -1596,7 +1596,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
z := obj.(*erasureServerPools)
er := z.serverPools[0].sets[0]
firstDisk := er.getDisks()[0]
err = firstDisk.DeleteVol(context.Background(), pathJoin(bucket, encodeDirObject(object)), true)
err = firstDisk.DeleteVol(t.Context(), pathJoin(bucket, encodeDirObject(object)), true)
if err != nil {
t.Fatalf("Failed to delete a file - %v", err)
}
@@ -1608,7 +1608,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
}
// Check if the empty directory is restored in the first disk
_, err = firstDisk.StatVol(context.Background(), pathJoin(bucket, encodeDirObject(object)))
_, err = firstDisk.StatVol(t.Context(), pathJoin(bucket, encodeDirObject(object)))
if err != nil {
t.Fatalf("Expected object to be present but stat failed - %v", err)
}
@@ -1656,7 +1656,7 @@ func TestHealLastDataShard(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16

View File

@@ -99,11 +99,11 @@ func TestReduceErrs(t *testing.T) {
}
// Validates list of all the testcases for returning valid errors.
for i, testCase := range testCases {
gotErr := reduceReadQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 5)
gotErr := reduceReadQuorumErrs(t.Context(), testCase.errs, testCase.ignoredErrs, 5)
if gotErr != testCase.err {
t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr)
}
gotNewErr := reduceWriteQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 6)
gotNewErr := reduceWriteQuorumErrs(t.Context(), testCase.errs, testCase.ignoredErrs, 6)
if gotNewErr != errErasureWriteQuorum {
t.Errorf("Test %d : expected %s, got %s", i+1, errErasureWriteQuorum, gotErr)
}
@@ -148,7 +148,7 @@ func TestHashOrder(t *testing.T) {
}
func TestShuffleDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16
@@ -196,7 +196,7 @@ func testShuffleDisks(t *testing.T, z *erasureServerPools) {
// TestEvalDisks tests the behavior of evalDisks
func TestEvalDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16

View File

@@ -18,7 +18,6 @@
package cmd
import (
"context"
"fmt"
"slices"
"strconv"
@@ -146,7 +145,7 @@ func TestObjectToPartOffset(t *testing.T) {
// Test them.
for _, testCase := range testCases {
index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset)
index, offset, err := fi.ObjectToPartOffset(t.Context(), testCase.offset)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
@@ -272,7 +271,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
fi, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, "", test.expectedQuorum)
fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum)
_, ok1 := err.(InsufficientReadQuorum)
_, ok2 := test.expectedErr.(InsufficientReadQuorum)
if ok1 != ok2 {

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2015-2023 MinIO, Inc.
// Copyright (c) 2015-2025 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@@ -527,7 +527,7 @@ func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object
}
// renamePart - renames multipart part to its relevant location under uploadID.
func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, optsMeta []byte, writeQuorum int) ([]StorageAPI, error) {
func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, optsMeta []byte, writeQuorum int, skipParent string) ([]StorageAPI, error) {
paths := []string{
dstEntry,
dstEntry + ".meta",
@@ -545,7 +545,7 @@ func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, src
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].RenamePart(ctx, srcBucket, srcEntry, dstBucket, dstEntry, optsMeta)
return disks[index].RenamePart(ctx, srcBucket, srcEntry, dstBucket, dstEntry, optsMeta, skipParent)
}, index)
}
@@ -668,10 +668,13 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
}
n, err := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
closeErrs := closeBitrotWriters(writers)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
if closeErr := reduceWriteQuorumErrs(ctx, closeErrs, objectOpIgnoredErrs, writeQuorum); closeErr != nil {
return pi, toObjectErr(closeErr, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
@@ -751,8 +754,11 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
ctx = rlkctx.Context()
defer uploadIDRLock.RUnlock(rlkctx)
onlineDisks, err = er.renamePart(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, partFI, writeQuorum)
onlineDisks, err = er.renamePart(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, partFI, writeQuorum, uploadIDPath)
if err != nil {
if errors.Is(err, errUploadIDNotFound) {
return pi, toObjectErr(errUploadIDNotFound, bucket, object, uploadID)
}
if errors.Is(err, errFileNotFound) {
// An in-quorum errFileNotFound means that client stream
// prematurely closed and we do not find any xl.meta or
@@ -925,7 +931,19 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
}
start := objectPartIndexNums(partNums, partNumberMarker)
if start != -1 {
if partNumberMarker > 0 && start == -1 {
// Marker not present among what is present on the
// server, we return an empty list.
return result, nil
}
if partNumberMarker > 0 && start != -1 {
if start+1 >= len(partNums) {
// Marker indicates that we are the end
// of the list, so we simply return empty
return result, nil
}
partNums = partNums[start+1:]
}
@@ -1049,7 +1067,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
PartNumber: partNumbers[pidx],
}.Error(),
}
}
return partInfosInQuorum, nil
}

View File

@@ -27,6 +27,8 @@ import (
"net/http"
"path"
"runtime"
"slices"
"sort"
"strconv"
"strings"
"sync"
@@ -811,8 +813,6 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
PoolIndex: er.poolIndex,
})
}
return
}()
validResp := 0
@@ -1176,11 +1176,15 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
}
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
closeErrs := closeBitrotWriters(writers)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaBucket, key)
}
if closeErr := reduceWriteQuorumErrs(ctx, closeErrs, objectOpIgnoredErrs, writeQuorum); closeErr != nil {
return ObjectInfo{}, toObjectErr(closeErr, minioMetaBucket, key)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
@@ -1423,11 +1427,15 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
bugLogIf(ctx, err)
}
n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
closeErrs := closeBitrotWriters(writers)
if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, bucket, object)
}
if closeErr := reduceWriteQuorumErrs(ctx, closeErrs, objectOpIgnoredErrs, writeQuorum); closeErr != nil {
return ObjectInfo{}, toObjectErr(closeErr, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if n < data.Size() {
@@ -1624,7 +1632,7 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
if !opts.NoAuditLog {
for _, obj := range objects {
auditObjectErasureSet(ctx, "DeleteObjects", obj.ObjectV.ObjectName, &er)
auditObjectErasureSet(ctx, "DeleteObjects", obj.ObjectName, &er)
}
}
@@ -1706,8 +1714,21 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
}
dedupVersions := make([]FileInfoVersions, 0, len(versionsMap))
for _, version := range versionsMap {
dedupVersions = append(dedupVersions, version)
for _, fivs := range versionsMap {
// Removal of existing versions and adding a delete marker in the same
// request is supported. At the same time, we cannot allow adding
// two delete markers on top of any object. To avoid this situation,
// we will sort deletions to execute existing deletion first,
// then add only one delete marker if requested
sort.SliceStable(fivs.Versions, func(i, j int) bool {
return !fivs.Versions[i].Deleted
})
if idx := slices.IndexFunc(fivs.Versions, func(fi FileInfo) bool {
return fi.Deleted
}); idx > -1 {
fivs.Versions = fivs.Versions[:idx+1]
}
dedupVersions = append(dedupVersions, fivs)
}
// Initialize list of errors.
@@ -1732,12 +1753,6 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
continue
}
for _, v := range dedupVersions[i].Versions {
if err == errFileNotFound || err == errFileVersionNotFound {
if !dobjects[v.Idx].DeleteMarker {
// Not delete marker, if not found, ok.
continue
}
}
delObjErrs[index][v.Idx] = err
}
}
@@ -1757,6 +1772,13 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
}
}
err := reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex])
if err == nil {
dobjects[objIndex].found = true
} else if isErrVersionNotFound(err) || isErrObjectNotFound(err) {
if !dobjects[objIndex].DeleteMarker {
err = nil
}
}
if objects[objIndex].VersionID != "" {
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName, objects[objIndex].VersionID)
} else {
@@ -2010,7 +2032,7 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
if opts.VersionPurgeStatus().Empty() && opts.DeleteMarkerReplicationStatus().Empty() {
markDelete = false
}
if opts.VersionPurgeStatus() == Complete {
if opts.VersionPurgeStatus() == replication.VersionPurgeComplete {
markDelete = false
}
// now, since VersionPurgeStatus() is already set, we can let the

View File

@@ -36,7 +36,7 @@ import (
)
func TestRepeatPutObjectPart(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
var objLayer ObjectLayer
@@ -50,7 +50,7 @@ func TestRepeatPutObjectPart(t *testing.T) {
}
// cleaning up of temporary test directories
defer objLayer.Shutdown(context.Background())
defer objLayer.Shutdown(t.Context())
defer removeRoots(disks)
err = objLayer.MakeBucket(ctx, "bucket1", MakeBucketOptions{})
@@ -91,7 +91,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
{"bucket", "dir/obj", nil},
}
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend
@@ -99,7 +99,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer xl.Shutdown(context.Background())
defer xl.Shutdown(t.Context())
err = xl.MakeBucket(ctx, "bucket", MakeBucketOptions{})
if err != nil {
@@ -131,8 +131,77 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
removeRoots(fsDirs)
}
func TestDeleteObjectsVersionedTwoPools(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasurePools()
if err != nil {
t.Fatal("Unable to initialize 'Erasure' object layer.", err)
}
// Remove all dirs.
for _, dir := range fsDirs {
defer os.RemoveAll(dir)
}
bucketName := "bucket"
objectName := "myobject"
err = obj.MakeBucket(ctx, bucketName, MakeBucketOptions{
VersioningEnabled: true,
})
if err != nil {
t.Fatal(err)
}
z, ok := obj.(*erasureServerPools)
if !ok {
t.Fatal("unexpected object layer type")
}
versions := make([]string, 2)
for i := range z.serverPools {
objInfo, err := z.serverPools[i].PutObject(ctx, bucketName, objectName,
mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{
Versioned: true,
})
if err != nil {
t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
}
versions[i] = objInfo.VersionID
}
// Remove and check the version in the second pool, then
// remove and check the version in the first pool
for testIdx, vid := range []string{versions[1], versions[0]} {
names := []ObjectToDelete{
{
ObjectV: ObjectV{
ObjectName: objectName,
VersionID: vid,
},
},
}
_, delErrs := obj.DeleteObjects(ctx, bucketName, names, ObjectOptions{
Versioned: true,
})
for i := range delErrs {
if delErrs[i] != nil {
t.Errorf("Test %d: Failed to remove object `%v` with the error: `%v`", testIdx, names[i], delErrs[i])
}
_, statErr := obj.GetObjectInfo(ctx, bucketName, objectName, ObjectOptions{
VersionID: names[i].VersionID,
})
switch statErr.(type) {
case VersionNotFound:
default:
t.Errorf("Test %d: Object %s is not removed", testIdx, objectName)
}
}
}
}
func TestDeleteObjectsVersioned(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasure(ctx, 16)
@@ -177,7 +246,6 @@ func TestDeleteObjectsVersioned(t *testing.T) {
VersionID: objInfo.VersionID,
},
}
}
names = append(names, ObjectToDelete{
ObjectV: ObjectV{
@@ -197,7 +265,7 @@ func TestDeleteObjectsVersioned(t *testing.T) {
for i, test := range testCases {
_, statErr := obj.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{
VersionID: names[i].ObjectV.VersionID,
VersionID: names[i].VersionID,
})
switch statErr.(type) {
case VersionNotFound:
@@ -212,7 +280,7 @@ func TestDeleteObjectsVersioned(t *testing.T) {
}
func TestErasureDeleteObjectsErasureSet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
obj, fsDirs, err := prepareErasureSets32(ctx)
@@ -285,7 +353,7 @@ func TestErasureDeleteObjectsErasureSet(t *testing.T) {
}
func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -294,7 +362,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@@ -354,7 +422,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
}
func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -363,7 +431,7 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@@ -414,7 +482,7 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
}
func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -423,7 +491,7 @@ func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@@ -485,7 +553,7 @@ func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
}
func TestGetObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -494,7 +562,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@@ -594,7 +662,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
func TestHeadObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -603,7 +671,7 @@ func TestHeadObjectNoQuorum(t *testing.T) {
t.Fatal(err)
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@@ -671,7 +739,7 @@ func TestHeadObjectNoQuorum(t *testing.T) {
}
func TestPutObjectNoQuorum(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -681,7 +749,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@@ -734,7 +802,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
}
func TestPutObjectNoQuorumSmall(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -744,7 +812,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)
@@ -801,7 +869,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
// Test PutObject twice, one small and another bigger
// than small data threshold and checks reading them again
func TestPutObjectSmallInlineData(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
const numberOfDisks = 4
@@ -813,7 +881,7 @@ func TestPutObjectSmallInlineData(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
bucket := "bucket"
@@ -1063,7 +1131,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
// In some deployments, one object has data inlined in one disk and not inlined in other disks.
func TestGetObjectInlineNotInline(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create a backend with 4 disks named disk{1...4}, this name convention
@@ -1083,7 +1151,7 @@ func TestGetObjectInlineNotInline(t *testing.T) {
}
// cleaning up of temporary test directories
defer objLayer.Shutdown(context.Background())
defer objLayer.Shutdown(t.Context())
defer removeRoots(fsDirs)
// Create a testbucket
@@ -1124,7 +1192,7 @@ func TestGetObjectWithOutdatedDisks(t *testing.T) {
t.Skip()
}
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
// Create an instance of xl backend.
@@ -1134,7 +1202,7 @@ func TestGetObjectWithOutdatedDisks(t *testing.T) {
}
// Cleanup backend directories.
defer obj.Shutdown(context.Background())
defer obj.Shutdown(t.Context())
defer removeRoots(fsDirs)
z := obj.(*erasureServerPools)

View File

@@ -1014,18 +1014,14 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
defer wk.Give()
// We will perpetually retry listing if it fails, since we cannot
// possibly give up in this matter
for {
if contextCanceled(ctx) {
break
}
for !contextCanceled(ctx) {
err := set.listObjectsToDecommission(ctx, bi,
func(entry metaCacheEntry) {
wk.Take()
go decommissionEntry(entry)
},
)
if err == nil || errors.Is(err, context.Canceled) {
if err == nil || errors.Is(err, context.Canceled) || errors.Is(err, errVolumeNotFound) {
break
}
setN := humanize.Ordinal(setIdx + 1)
@@ -1186,29 +1182,32 @@ func (z *erasureServerPools) checkAfterDecom(ctx context.Context, idx int) error
return
}
// `.usage-cache.bin` still exists, must be not readable ignore it.
if bi.Name == minioMetaBucket && strings.Contains(entry.name, dataUsageCacheName) {
// skipping bucket usage cache name, as its autogenerated.
return
}
fivs, err := entry.fileInfoVersions(bi.Name)
if err != nil {
return
}
// We need a reversed order for decommissioning,
// to create the appropriate stack.
versionsSorter(fivs.Versions).reverse()
var ignored int
for _, version := range fivs.Versions {
// Apply lifecycle rules on the objects that are expired.
if filterLifecycle(bi.Name, version.Name, version) {
ignored++
continue
}
// `.usage-cache.bin` still exists, must be not readable ignore it.
if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) {
// skipping bucket usage cache name, as its autogenerated.
if version.Deleted {
ignored++
continue
}
versionsFound++
}
versionsFound += len(fivs.Versions) - ignored
}); err != nil {
return err
}

View File

@@ -32,9 +32,9 @@ func prepareErasurePools() (ObjectLayer, []string, error) {
pools := mustGetPoolEndpoints(0, fsDirs[:16]...)
pools = append(pools, mustGetPoolEndpoints(1, fsDirs[16:]...)...)
// Everything is fine, should return nil
objLayer, err := newErasureServerPools(context.Background(), pools)
objLayer, _, err := initObjectLayer(context.Background(), pools)
if err != nil {
removeRoots(fsDirs)
return nil, nil, err
}
return objLayer, fsDirs, nil

View File

@@ -45,6 +45,7 @@ import (
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v3/sync/errgroup"
"github.com/minio/pkg/v3/wildcard"
"github.com/minio/pkg/v3/workers"
"github.com/puzpuzpuz/xsync/v3"
)
@@ -1248,89 +1249,42 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
ctx = lkctx.Context()
defer multiDeleteLock.Unlock(lkctx)
// Fetch location of up to 10 objects concurrently.
poolObjIdxMap := map[int][]ObjectToDelete{}
origIndexMap := map[int][]int{}
dObjectsByPool := make([][]DeletedObject, len(z.serverPools))
dErrsByPool := make([][]error, len(z.serverPools))
// Always perform 1/10th of the number of objects per delete
concurrent := len(objects) / 10
if concurrent <= 10 {
// if we cannot get 1/10th then choose the number of
// objects as concurrent.
concurrent = len(objects)
}
var mu sync.Mutex
eg := errgroup.WithNErrs(len(objects)).WithConcurrency(concurrent)
for j, obj := range objects {
j := j
obj := obj
eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools))
for i, pool := range z.serverPools {
i := i
pool := pool
eg.Go(func() error {
pinfo, _, err := z.getPoolInfoExistingWithOpts(ctx, bucket, obj.ObjectName, ObjectOptions{
NoLock: true,
})
if err != nil {
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
derrs[j] = err
}
dobjects[j] = DeletedObject{
ObjectName: decodeDirObject(obj.ObjectName),
VersionID: obj.VersionID,
}
return nil
}
// Delete marker already present we are not going to create new delete markers.
if pinfo.ObjInfo.DeleteMarker && obj.VersionID == "" {
dobjects[j] = DeletedObject{
DeleteMarker: pinfo.ObjInfo.DeleteMarker,
DeleteMarkerVersionID: pinfo.ObjInfo.VersionID,
DeleteMarkerMTime: DeleteMarkerMTime{pinfo.ObjInfo.ModTime},
ObjectName: decodeDirObject(pinfo.ObjInfo.Name),
}
return nil
}
idx := pinfo.Index
mu.Lock()
defer mu.Unlock()
poolObjIdxMap[idx] = append(poolObjIdxMap[idx], obj)
origIndexMap[idx] = append(origIndexMap[idx], j)
dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts)
return nil
}, j)
}, i)
}
eg.Wait() // wait to check all the pools.
if len(poolObjIdxMap) > 0 {
// Delete concurrently in all server pools.
var wg sync.WaitGroup
wg.Add(len(z.serverPools))
for idx, pool := range z.serverPools {
go func(idx int, pool *erasureSets) {
defer wg.Done()
objs := poolObjIdxMap[idx]
if len(objs) == 0 {
return
}
orgIndexes := origIndexMap[idx]
deletedObjects, errs := pool.DeleteObjects(ctx, bucket, objs, opts)
mu.Lock()
for i, derr := range errs {
if derr != nil {
derrs[orgIndexes[i]] = derr
}
deletedObjects[i].ObjectName = decodeDirObject(deletedObjects[i].ObjectName)
dobjects[orgIndexes[i]] = deletedObjects[i]
}
mu.Unlock()
}(idx, pool)
for i := range dobjects {
// Iterate over pools
for pool := range z.serverPools {
if dErrsByPool[pool][i] == nil && dObjectsByPool[pool][i].found {
// A fast exit when the object is found and removed
dobjects[i] = dObjectsByPool[pool][i]
derrs[i] = nil
break
}
if derrs[i] == nil {
// No error related to this object is found, assign this pool result
// whether it is nil because there is no object found or because of
// some other errors such erasure quorum errors.
dobjects[i] = dObjectsByPool[pool][i]
derrs[i] = dErrsByPool[pool][i]
}
}
wg.Wait()
}
for i := range dobjects {
dobjects[i].ObjectName = decodeDirObject(dobjects[i].ObjectName)
}
return dobjects, derrs
}
@@ -2514,7 +2468,7 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var poolErrs [][]error
poolErrs := make([][]error, len(z.serverPools))
for idx, erasureSet := range z.serverPools {
if opts.Pool != nil && *opts.Pool != idx {
continue
@@ -2523,20 +2477,20 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str
continue
}
errs := make([]error, len(erasureSet.sets))
var wg sync.WaitGroup
wk, _ := workers.New(3)
for idx, set := range erasureSet.sets {
if opts.Set != nil && *opts.Set != idx {
continue
}
wg.Add(1)
wk.Take()
go func(idx int, set *erasureObjects) {
defer wg.Done()
defer wk.Give()
errs[idx] = set.listAndHeal(ctx, bucket, prefix, opts.Recursive, opts.ScanMode, healEntry)
}(idx, set)
}
wg.Wait()
poolErrs = append(poolErrs, errs)
wk.Wait()
poolErrs[idx] = errs
}
for _, errs := range poolErrs {
for _, err := range errs {

View File

@@ -159,7 +159,7 @@ func TestCrcHashMod(t *testing.T) {
// TestNewErasure - tests initialization of all input disks
// and constructs a valid `Erasure` object
func TestNewErasureSets(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
nDisks := 16 // Maximum disks.

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"crypto/rand"
"io"
"testing"
@@ -52,11 +51,11 @@ func TestErasureEncodeDecode(t *testing.T) {
buffer := make([]byte, len(data), 2*len(data))
copy(buffer, data)
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV2)
erasure, err := NewErasure(t.Context(), test.dataBlocks, test.parityBlocks, blockSizeV2)
if err != nil {
t.Fatalf("Test %d: failed to create erasure: %v", i, err)
}
encoded, err := erasure.EncodeData(context.Background(), buffer)
encoded, err := erasure.EncodeData(t.Context(), buffer)
if err != nil {
t.Fatalf("Test %d: failed to encode data: %v", i, err)
}
@@ -69,7 +68,7 @@ func TestErasureEncodeDecode(t *testing.T) {
}
if test.reconstructParity {
err = erasure.DecodeDataAndParityBlocks(context.Background(), encoded)
err = erasure.DecodeDataAndParityBlocks(t.Context(), encoded)
} else {
err = erasure.DecodeDataBlocks(encoded)
}
@@ -98,7 +97,7 @@ func TestErasureEncodeDecode(t *testing.T) {
}
decodedData := new(bytes.Buffer)
if _, err = writeDataBlocks(context.Background(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
if _, err = writeDataBlocks(t.Context(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil {
t.Errorf("Test %d: failed to write data blocks: %v", i, err)
}
if !bytes.Equal(decodedData.Bytes(), data) {
@@ -127,7 +126,7 @@ func newErasureTestSetup(tb testing.TB, dataBlocks int, parityBlocks int, blockS
if err != nil {
return nil, err
}
err = disks[i].MakeVol(context.Background(), "testbucket")
err = disks[i].MakeVol(tb.Context(), "testbucket")
if err != nil {
return nil, err
}

View File

@@ -367,9 +367,10 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error)
}
return minio.New(driver.endpoint, &minio.Options{
Creds: mcreds,
Secure: globalIsTLS,
Transport: tr,
Creds: mcreds,
Secure: globalIsTLS,
Transport: tr,
TrailingHeaders: true,
})
}
@@ -381,9 +382,10 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error)
}
return minio.New(driver.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(ui.Credentials.AccessKey, ui.Credentials.SecretKey, ""),
Secure: globalIsTLS,
Transport: tr,
Creds: credentials.NewStaticV4(ui.Credentials.AccessKey, ui.Credentials.SecretKey, ""),
Secure: globalIsTLS,
Transport: tr,
TrailingHeaders: true,
})
}
@@ -552,6 +554,7 @@ func (driver *ftpDriver) PutFile(ctx *ftp.Context, objPath string, data io.Reade
info, err := clnt.PutObject(context.Background(), bucket, object, data, -1, minio.PutObjectOptions{
ContentType: mimedb.TypeByExtension(path.Ext(object)),
DisableContentSha256: true,
Checksum: minio.ChecksumFullObjectCRC32C,
})
n = info.Size
return n, err

View File

@@ -22,11 +22,11 @@ import (
"net"
"net/http"
"path"
"path/filepath"
"runtime/debug"
"strings"
"sync/atomic"
"time"
"unicode"
"github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7/pkg/s3utils"
@@ -293,12 +293,6 @@ func parseAmzDateHeader(req *http.Request) (time.Time, APIErrorCode) {
return time.Time{}, ErrMissingDateHeader
}
// Bad path components to be rejected by the path validity handler.
const (
dotdotComponent = ".."
dotComponent = "."
)
func hasBadHost(host string) error {
if globalIsCICD && strings.TrimSpace(host) == "" {
// under CI/CD test setups ignore empty hosts as invalid hosts
@@ -311,21 +305,41 @@ func hasBadHost(host string) error {
// Check if the incoming path has bad path components,
// such as ".." and "."
func hasBadPathComponent(path string) bool {
if len(path) > 4096 {
// path cannot be greater than Linux PATH_MAX
// this is to avoid a busy loop, that can happen
// if the caller sends path of following style
// a/a/a/a/a/a/a/a...
n := len(path)
if n > 32<<10 {
// At 32K we are beyond reasonable.
return true
}
path = filepath.ToSlash(strings.TrimSpace(path)) // For windows '\' must be converted to '/'
for _, p := range strings.Split(path, SlashSeparator) {
switch strings.TrimSpace(p) {
case dotdotComponent:
i := 0
// Skip leading slashes (for sake of Windows \ is included as well)
for i < n && (path[i] == SlashSeparatorChar || path[i] == '\\') {
i++
}
for i < n {
// Find the next segment
start := i
for i < n && path[i] != SlashSeparatorChar && path[i] != '\\' {
i++
}
// Trim whitespace of segment
segmentStart, segmentEnd := start, i
for segmentStart < segmentEnd && unicode.IsSpace(rune(path[segmentStart])) {
segmentStart++
}
for segmentEnd > segmentStart && unicode.IsSpace(rune(path[segmentEnd-1])) {
segmentEnd--
}
// Check for ".." or "."
switch {
case segmentEnd-segmentStart == 2 && path[segmentStart] == '.' && path[segmentStart+1] == '.':
return true
case dotComponent:
case segmentEnd-segmentStart == 1 && path[segmentStart] == '.':
return true
}
i++
}
return false
}

View File

@@ -22,6 +22,7 @@ import (
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"github.com/minio/minio/internal/crypto"
@@ -184,3 +185,27 @@ func TestSSETLSHandler(t *testing.T) {
}
}
}
func Benchmark_hasBadPathComponent(t *testing.B) {
tests := []struct {
name string
input string
want bool
}{
{name: "empty", input: "", want: false},
{name: "backslashes", input: `\a\a\ \\ \\\\\\\`, want: false},
{name: "long", input: strings.Repeat("a/", 2000), want: false},
{name: "long-fail", input: strings.Repeat("a/", 2000) + "../..", want: true},
}
for _, tt := range tests {
t.Run(tt.name, func(b *testing.B) {
b.SetBytes(int64(len(tt.input)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if got := hasBadPathComponent(tt.input); got != tt.want {
t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want)
}
}
})
}
}

View File

@@ -352,10 +352,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
disks[i], disks[j] = disks[j], disks[i]
})
expectedDisks := len(disks)/2 + 1
fallbackDisks := disks[expectedDisks:]
disks = disks[:expectedDisks]
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil {
return false
@@ -518,7 +514,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
err = listPathRaw(ctx, listPathRawOptions{
disks: disks,
fallbackDisks: fallbackDisks,
bucket: bucket,
recursive: true,
forwardTo: forwardTo,
@@ -539,7 +534,8 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
go healEntry(bucket, *entry)
},
finished: func(errs []error) {
if countErrs(errs, nil) != len(errs) {
success := countErrs(errs, nil)
if success < len(disks)/2+1 {
retErr = fmt.Errorf("one or more errors reported during listing: %v", errors.Join(errs...))
}
},

Some files were not shown because too many files have changed in this diff Show More