Compare commits

...

255 Commits

Author SHA1 Message Date
Andreas Auernhammer
404d2ebe3f set SSE headers in put-part response (#12008)
This commit fixes a bug in the put-part
implementation. The SSE headers should be
set as specified by AWS - See:
https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html

Now, the MinIO server should set SSE-C headers,
like `x-amz-server-side-encryption-customer-algorithm`.

Fixes #11991
2021-04-07 14:50:28 -07:00
Minio Trusted
46964eb764 Update yaml files to latest version RELEASE.2021-04-06T23-11-00Z 2021-04-06 23:35:33 +00:00
Poorna Krishnamoorthy
bfab990c33 Improve error message from SetRemoteTargetHandler (#11909) 2021-04-06 12:42:30 -07:00
Harshavardhana
94018588fe unmarshal both LegalHold and ObjectLockLegalHold XML types (#11921)
Because of silly AWS S3 behavior we to handle both types.

fixes #11920
2021-04-06 12:41:56 -07:00
Anis Elleuch
8b76ba8d5d crawling: Apply lifecycle then decide healing action (#11563)
It is inefficient to decide to heal an object before checking its
lifecycle for expiration or transition. This commit will just reverse
the order of action: evaluate lifecycle and heal only if asked and
lifecycle resulted a NoneAction.
2021-04-06 12:41:51 -07:00
Harshavardhana
7eb7f65e48 add policy conditions support for signatureVersion and authType (#11947)
https://docs.aws.amazon.com/AmazonS3/latest/API/bucket-policy-s3-sigv4-conditions.html

fixes #11944
2021-04-06 12:41:31 -07:00
Harshavardhana
c608c0688a fix: properly close leaking bandwidth monitor channel (#11967)
This PR fixes

- close leaking bandwidth report channel leakage
- remove the closer requirement for bandwidth monitor
  instead if Read() fails remember the error and return
  error for all subsequent reads.
- use locking for usage-cache.bin updates, with inline
  data we cannot afford to have concurrent writes to
  usage-cache.bin corrupting xl.meta
2021-04-06 12:40:42 -07:00
Aditya Manthramurthy
41a9d1d778 Fix S3Select SQL column reference handling (#11957)
This change fixes handling of these types of queries:

- Double quoted column names with special characters:
    SELECT "column.name" FROM s3object
- Double quoted column names with reserved keywords:
    SELECT "CAST" FROM s3object
- Table name as prefix for column names:
    SELECT S3Object."CAST" FROM s3object
2021-04-06 12:40:28 -07:00
Klaus Post
e21e80841e Fix data race when connecting disks (#11983)
Multiple disks from the same set would be writing concurrently.

```
WARNING: DATA RACE
Write at 0x00c002100ce0 by goroutine 166:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks.func1()
      d:/minio/minio/cmd/erasure-sets.go:254 +0x82f

Previous write at 0x00c002100ce0 by goroutine 129:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks.func1()
      d:/minio/minio/cmd/erasure-sets.go:254 +0x82f

Goroutine 166 (running) created at:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks()
      d:/minio/minio/cmd/erasure-sets.go:210 +0x324
  github.com/minio/minio/cmd.(*erasureSets).monitorAndConnectEndpoints()
      d:/minio/minio/cmd/erasure-sets.go:288 +0x244

Goroutine 129 (finished) created at:
  github.com/minio/minio/cmd.(*erasureSets).connectDisks()
      d:/minio/minio/cmd/erasure-sets.go:210 +0x324
  github.com/minio/minio/cmd.(*erasureSets).monitorAndConnectEndpoints()
      d:/minio/minio/cmd/erasure-sets.go:288 +0x244
```
2021-04-06 12:39:59 -07:00
Klaus Post
98c792bbeb Fix disk info race (#11984)
Protect updated members in xlStorage.

```
WARNING: DATA RACE
Write at 0x00c004b4ee78 by goroutine 1491:
  github.com/minio/minio/cmd.(*xlStorage).GetDiskID()
      d:/minio/minio/cmd/xl-storage.go:590 +0x1078
  github.com/minio/minio/cmd.(*xlStorageDiskIDCheck).checkDiskStale()
      d:/minio/minio/cmd/xl-storage-disk-id-check.go:195 +0x84
  github.com/minio/minio/cmd.(*xlStorageDiskIDCheck).StatVol()
      d:/minio/minio/cmd/xl-storage-disk-id-check.go:284 +0x16a
  github.com/minio/minio/cmd.erasureObjects.getBucketInfo.func1()
      d:/minio/minio/cmd/erasure-bucket.go:100 +0x1a5
  github.com/minio/minio/pkg/sync/errgroup.(*Group).Go.func1()
      d:/minio/minio/pkg/sync/errgroup/errgroup.go:122 +0xd7

Previous read at 0x00c004b4ee78 by goroutine 1087:
  github.com/minio/minio/cmd.(*xlStorage).CheckFile.func1()
      d:/minio/minio/cmd/xl-storage.go:1699 +0x384
  github.com/minio/minio/cmd.(*xlStorage).CheckFile()
      d:/minio/minio/cmd/xl-storage.go:1726 +0x13c
  github.com/minio/minio/cmd.(*xlStorageDiskIDCheck).CheckFile()
      d:/minio/minio/cmd/xl-storage-disk-id-check.go:446 +0x23b
  github.com/minio/minio/cmd.erasureObjects.parentDirIsObject.func1()
      d:/minio/minio/cmd/erasure-common.go:173 +0x194
  github.com/minio/minio/pkg/sync/errgroup.(*Group).Go.func1()
      d:/minio/minio/pkg/sync/errgroup/errgroup.go:122 +0xd7
```
2021-04-06 12:39:57 -07:00
Klaus Post
f687ba53bc Fix Access Key requests (#11979)
Fix accessing claims when auth error is unchecked.

Only replaced when unchecked and when clearly without side effects.

Fixes #11959
2021-04-06 11:03:55 -07:00
Harshavardhana
e3da59c923 fix possible crash in bucket bandwidth monitor (#11986) 2021-04-06 11:03:41 -07:00
Harshavardhana
781b9b051c fix: service accounts policy enforcement regression (#11910)
service accounts were not inheriting parent policies
anymore due to refactors in the PolicyDBGet() from
the latest release, fix this behavior properly.
2021-04-06 08:58:05 -07:00
Harshavardhana
438becfde8 fix: delete/delete marker replication versions consistent (#11932)
replication didn't work as expected when deletion of
delete markers was requested in DeleteMultipleObjects
API, this is due to incorrect lookup elements being
used to look for delete markers.
2021-04-06 08:57:36 -07:00
Harshavardhana
16ef338649 fix: notify parent user in notification events (#11934)
fixes #11885
2021-04-06 08:55:37 -07:00
Harshavardhana
3242847ec0 avoid network read errors crashing CreateFile call (#11939)
Thanks to @dvaldivia for reproducing this
2021-04-06 08:55:30 -07:00
Harshavardhana
cf87303094 do not call LocalStorageInfo on gateways (#11903)
fixes https://github.com/minio/mc/issues/3665
2021-03-25 15:26:22 -07:00
Harshavardhana
90d8ec6310 fix: reject duplicate keys in PostPolicyJSON document (#11902)
fixes #11894
2021-03-25 13:57:57 -07:00
Klaus Post
b383522743 fix error could not read /proc ion windows. (#11868)
Bonus: Prealloc reasonable sizes for metrics.
2021-03-25 12:58:43 -07:00
Andreas Auernhammer
6d42036dd4 highwayhash: update to latest version containing an arm64 fix (#11901)
This commit updates the highwayhash version to `v1.0.2`
that fixes a critical issue on arm64.
2021-03-25 11:44:58 -07:00
Aditya Manthramurthy
b4d8bcf644 Converge PolicyDBGet functions in IAM (#11891) 2021-03-25 00:38:15 -07:00
Harshavardhana
d7f32ad649 xl: avoid sending Delete() remote call for fully successful runs
an optimization to avoid extra syscalls in PutObject(),
adds up to our PutObject response times.
2021-03-24 17:32:12 -07:00
Aditya Manthramurthy
906d68c356 Fix LDAP policy application on user policy (#11887) 2021-03-24 12:29:25 -07:00
Klaus Post
749e9c5771 metrics: Add canceled requests (#11881)
Add metric for canceled requests
2021-03-24 10:25:27 -07:00
Harshavardhana
410e84d273 xl: add checks for minioTmpMetaBucket in CreateFile 2021-03-24 09:36:10 -07:00
Harshavardhana
75741dbf4a xl: remove cleanupDir instead use Delete() (#11880)
use a single call to remove directly at disk
instead of doing recursively at network layer.
2021-03-24 09:08:05 -07:00
Anis Elleuch
fad7b27f15 metrics: Change type of minio_s3_requests_waiting_total to gauge (#11884) 2021-03-24 09:06:37 -07:00
Harshavardhana
79564656eb xl: CreateFile shouldn't prematurely timeout (#11878)
For large objects taking more than '3 minutes' response
times in a single PUT operation can timeout prematurely
as 'ResponseHeader' timeout hits for 3 minutes. Avoid
this by keeping the connection active during CreateFile
phase.
2021-03-24 09:05:03 -07:00
Harshavardhana
21cfc4aa49 Revert "xl: CreateFile shouldn't prematurely timeout (#11854)"
This reverts commit 922c7b57f5.
2021-03-23 23:47:45 -07:00
Harshavardhana
e80239a661 simplify OS instrumentation remove functions for global variables 2021-03-23 22:32:44 -07:00
Ritesh H Shukla
6a2ed44095 fix: optionally enable tracing posix calls 2021-03-23 22:23:08 -07:00
Aditya Manthramurthy
8adfeb0d84 fix: AccountInfo API for LDAP users (#11874)
Also, ensure admin APIs auth additionally validates groups
2021-03-23 17:39:20 -07:00
Harshavardhana
d23485e571 fix: LDAP groups handling and group mapping (#11855)
comprehensively handle group mapping for LDAP
users across IAM sub-subsytem.
2021-03-23 15:15:51 -07:00
Harshavardhana
da70e6ddf6 avoid healObjects recursively healing at empty path (#11856)
baseDirFromPrefix(prefix) for object names without
parent directory incorrectly uses empty path, leading
to long listing at various paths that are not useful
for healing - avoid this listing completely if "baseDir"
returns empty simple use the "prefix" as is.

this improves startup performance significantly
2021-03-23 07:57:07 -07:00
Harshavardhana
922c7b57f5 xl: CreateFile shouldn't prematurely timeout (#11854)
For large objects taking more than '3 minutes' response
times in a single PUT operation can timeout prematurely
as 'ResponseHeader' timeout hits for 3 minutes. Avoid
this by keeping the connection active during CreateFile
phase.
2021-03-22 18:25:05 -07:00
Harshavardhana
726d80dbb7 fix: merge duplicate keys in post policy (#11843)
some SDKs might incorrectly send duplicate
entries for keys such as "conditions", Go
stdlib unmarshal for JSON does not support
duplicate keys - instead skips the first
duplicate and only preserves the last entry.

This can lead to issues where a policy JSON
while being valid might not properly apply
the required conditions, allowing situations
where POST policy JSON would end up allowing
uploads to unauthorized buckets and paths.

This PR fixes this properly.
2021-03-20 22:16:30 -07:00
Ritesh H Shukla
23b03dadb8 Add process uptime metric (#11844) 2021-03-20 21:23:27 -07:00
Andreas Auernhammer
7b3719c17b crypto: simplify Context encoding (#11812)
This commit adds a `MarshalText` implementation
to the `crypto.Context` type.
The `MarshalText` implementation replaces the
`WriteTo` and `AppendTo` implementation.

It is slightly slower than the `AppendTo` implementation
```
goos: darwin
goarch: arm64
pkg: github.com/minio/minio/cmd/crypto
BenchmarkContext_AppendTo/0-elems-8         	381475698	         2.892 ns/op	       0 B/op	       0 allocs/op
BenchmarkContext_AppendTo/1-elems-8         	17945088	        67.54 ns/op	       0 B/op	       0 allocs/op
BenchmarkContext_AppendTo/3-elems-8         	 5431770	       221.2 ns/op	      72 B/op	       2 allocs/op
BenchmarkContext_AppendTo/4-elems-8         	 3430684	       346.7 ns/op	      88 B/op	       2 allocs/op
```
vs.
```
BenchmarkContext/0-elems-8         	135819834	         8.658 ns/op	       2 B/op	       1 allocs/op
BenchmarkContext/1-elems-8         	13326243	        89.20 ns/op	     128 B/op	       1 allocs/op
BenchmarkContext/3-elems-8         	 4935301	       243.1 ns/op	     200 B/op	       3 allocs/op
BenchmarkContext/4-elems-8         	 2792142	       428.2 ns/op	     504 B/op	       4 allocs/op
goos: darwin
```

However, the `AppendTo` benchmark used a pre-allocated buffer. While
this improves its performance it does not match the actual usage of
`crypto.Context` which is passed to a `KMS` and always encoded into
a newly allocated buffer.

Therefore, this change seems acceptable since it should not impact the
actual performance but reduces the overall code for Context marshaling.
2021-03-20 02:48:48 -07:00
Harshavardhana
9a6487319a remove MINIO_IO_DEADLINE support (#11841)
this feature in actual deployment was found
to be not that useful, remove support for this
for now.
2021-03-20 02:47:04 -07:00
Aditya Manthramurthy
94ff624242 Fix querying LDAP group/user policy (#11840) 2021-03-20 02:37:52 -07:00
Anis Elleuch
98ff91b484 xl: Reduce usage of isDirEmpty() (#11838)
When an object is removed, its parent directory is inspected to check if
it is empty to remove if that is the case.

However, we can use os.Remove() directly since it is only able to remove
a file or an empty directory.
2021-03-19 15:42:01 -07:00
Anis Elleuch
4d86384dc7 xl: Remove non needed check for empty dir (#11835)
RenameData renames xl.meta and data dir and removes the parent directory
if empty, however, there is a duplicate check for empty dir, since the
parent dir of xl.meta is always the same as the data-dir.
2021-03-19 12:26:53 -07:00
mailsmail
27eb4ae3bc fix: sql cast function when converting to float (#11817) 2021-03-19 09:14:38 -07:00
Ritesh H Shukla
b5dcaaccb4 Introduce metrics caching for performant metrics (#11831) 2021-03-19 00:04:29 -07:00
Anis Elleuch
0843280dc3 lifecycle: Support old BucketLifecycleConfiguration tag (#11828)
Some old AWS SDKs send BucketLifecycleConfiguration as the root tag in
the bucket lifecycle document. This PR will support both
LifecycleConfiguration and BucketLifecycleConfiguration.
2021-03-18 22:18:35 -07:00
Harshavardhana
61a1ea60c2 add missing java headless jdk in mint 2021-03-18 20:38:50 -07:00
Harshavardhana
b92a220db1 fix: handle weird drives sporadic read O_DIRECT behavior (#11832)
on freshReads if drive returns errInvalidArgument, we
should simply turn-off DirectIO and read normally, there
are situations in k8s like environments where the drives
behave sporadically in a single deployment and may not
have been implemented properly to handle O_DIRECT for
reads.
2021-03-18 20:16:50 -07:00
Shireesh Anjal
be5910b87e fix: bucket / object count and size returned as 0 (#11825) 2021-03-18 14:40:21 -07:00
Harshavardhana
51a8619a79 [feat] Add configurable deadline for writers (#11822)
This PR adds deadlines per Write() calls, such
that slow drives are timed-out appropriately and
the overall responsiveness for Writes() is always
up to a predefined threshold providing applications
sustained latency even if one of the drives is slow
to respond.
2021-03-18 14:09:55 -07:00
iternity-dotcom
d46c3c07a8 Add main_test.go to run system tests with coverage (#11783)
-  Build and RUN test executable: 
```
$ go test -tags testrunmain -covermode count
	-coverpkg="./..." -c -tags testrunmain
$ APP_ARGS="server /tmp/test" ./minio.test
	-test.run "^TestRunMain$"
	-test.coverprofile coverage.cov
```

- Or run the system under test just by calling go test
```
$ APP_ARGS="server /tmp/test" go test
	-cover
	-tags testrunmain
	-coverpkg="./..."
	-covermode count
	-coverprofile=coverage.cov
```

- Run System-Tests (when using GitBash prefix this 
  line with MSYS_NO_PATHCONV=1) Note the 
  SERVER_ENDPOINT must be reachable from 
  inside the docker container (so don't use localhost!)
```
$ docker run
	-e MINT_MODE=full
	-e SERVER_ENDPOINT=192.168.47.11:9000
	-e ACCESS_KEY=minioadmin
	-e SECRET_KEY=minioadmin
	-v /tmp/mint/log:/mint/log
	minio/mint
``

- Stop system under test  by sending SIGTERM
```
$ ctrl+c
```

- Transform coverage file to HTML
```
$ go tool cover -html=./coverage.cov -o coverage.html
```
2021-03-18 13:14:44 -07:00
Anis Elleuch
14d89eaae4 mrf: Enhance behavior for better results (#11788)
MRF was starting to heal when it receives a disk connection event, which
is not good when a node having multiple disks reconnects to the cluster.

Besides, MRF needs Remove healing option to remove stale files.
2021-03-18 11:19:02 -07:00
ebozduman
32b088a2ff No retries if minio server is down/connection refused err (#11809) 2021-03-18 11:05:48 -07:00
Harshavardhana
eed3b66d98 dsync: use refresh timer properly to avoid leaks (#11820)
timer pattern should always involve a 'Stop()/Reset()' otherwise
 `time.NewTimer(duration).C` will always leak.
2021-03-17 16:37:13 -07:00
Harshavardhana
add3cd4e44 allow configuring delete cleanup interval from default 10minutes (#11818) 2021-03-17 15:15:58 -07:00
Harshavardhana
60b0f2324e storage write call path optimizations (#11805)
- write in o_dsync instead of o_direct for smaller
  objects to avoid unaligned double Write() situations
  that may arise for smaller objects < 128KiB
- avoid fallocate() as its not useful since we do not
  use Append() semantics anymore, fallocate is not useful
  for streaming I/O we can save on a syscall
- createFile() doesn't need to validate `bucket` name
  with a Lstat() call since createFile() is only used
  to write at `minioTmpBucket`
- use io.Copy() when writing unAligned writes to allow
  usage of ReadFrom() from *os.File providing zero
  buffer writes().
2021-03-17 09:38:38 -07:00
Anis Elleuch
0eb146e1b2 add additional metrics per disk API latency, API call counts #11250)
```
mc admin info --json
```

provides these details, for now, we shall eventually 
expose this at Prometheus level eventually. 

Co-authored-by: Harshavardhana <harsha@minio.io>
2021-03-16 20:06:57 -07:00
Minio Trusted
b379ca3bb0 Update yaml files to latest version RELEASE.2021-03-17T02-33-02Z 2021-03-17 02:56:28 +00:00
Andreas Auernhammer
e197800f90 s3v4: read and verify S3 signature v4 chunks separately (#11801)
This commit fixes a security issue in the signature v4 chunked
reader. Before, the reader returned unverified data to the caller
and would only verify the chunk signature once it has encountered
the end of the chunk payload.

Now, the chunk reader reads the entire chunk into an in-memory buffer,
verifies the signature and then returns data to the caller.

In general, this is a common security problem. We verifying data
streams, the verifier MUST NOT return data to the upper layers / its
callers as long as it has not verified the current data chunk / data
segment:
```
func (r *Reader) Read(buffer []byte) {
   if err := r.readNext(r.internalBuffer); err != nil {
      return err
   }
   if err := r.verify(r.internalBuffer); err != nil {
      return err
   }
   copy(buffer, r.internalBuffer)
}
```
2021-03-16 13:33:40 -07:00
Ravind Kumar
980311fdfd Fix STANDARD defaults, point to new docs site. (#11800) 2021-03-16 12:04:28 -07:00
Klaus Post
771dea175c erasure pools enable faster checks for file not found (#11799)
For operations that require the object to exist make it possible to 
detect if the file isn't found in *any* pool.

This will allow these to return the error early without having to re-check.
2021-03-16 11:02:20 -07:00
Anis Elleuch
fa94682e83 policy: Add Merge API (#11793)
This commit adds a new API in pkg/bucket/policy package called
Merge to merge multiple policies of a user or a group into one
policy document.
2021-03-16 08:50:36 -07:00
Harshavardhana
6160188bf3 fix: erasure index based reading based on actual ParityBlocks (#11792)
in some setups with ordering issues in drive configuration,
we should rely on expected parityBlocks instead of `len(disks)/2`
2021-03-15 20:03:13 -07:00
Klaus Post
e5a1a2a974 s3 select: fix date_diff behavior (#11786)
Fixes #11785 and adds tests for samples given.
2021-03-15 14:15:52 -07:00
Steve Wills
642ba3f2d6 fix: runtime issue on FreeBSD due to missing O_NOATIME/O_DSYNC support (#11790)
See also:

https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=253937
2021-03-15 14:02:36 -07:00
Harshavardhana
4d80de899a fix: mips 32bit compilation issue (#11775)
fixes #11768
2021-03-15 06:02:09 -07:00
Harshavardhana
afbd3e41eb add missing principalId in web notifications (#11777)
fixes #11561
2021-03-13 10:52:43 -08:00
Poorna Krishnamoorthy
5e003549cc Replication: Enforce DeleteMarker disable setting (#11720)
This PR also enforces DeleteReplication
disable setting
2021-03-13 10:28:35 -08:00
Nitish Tiwari
7fa3e4106b Add consoleAdmin as a default canned policy (#11770) 2021-03-12 12:51:43 -08:00
Philip Brown
75db500e85 cmd/os-readdir_other.go - return nil with err (#11772) 2021-03-12 07:22:25 -08:00
Harshavardhana
910097bbbc update browser assets for react-qr-code 2021-03-11 16:58:15 -08:00
Minio Trusted
afd346417d Update yaml files to latest version RELEASE.2021-03-12T00-00-47Z 2021-03-12 00:23:57 +00:00
Harshavardhana
feafccf007 handle trimming '/' if present in the object names (#11765)
- MultipleDeletes should handle '/' prefix for objectnames
- Trimming the slash alone is enough for ListObjects()
  prefix and markers

fixes #11769
2021-03-11 13:57:03 -08:00
S Santhosh Nagaraj
9b54fcdf12 feat: Add QR Code to Share Object Modal (#11735)
Co-authored-by: Kaan Kabalak <kaankabalak@gmail.com>
2021-03-11 11:21:45 -08:00
Anis Elleuch
f92b7a5621 Browser: Shared link has content-disposition header (#11712)
The shared link will be automatically downloadable when the user opens
the shared link in a browser.
2021-03-10 23:02:16 -08:00
Poorna Krishnamoorthy
c25e75f0b5 Fix redact LDAP password properly (#11762)
fixes #11742 previous pull request #11750 fixed only the web trace
2021-03-10 11:05:38 -08:00
Harshavardhana
3ffe520643 add release build-arg to docker multiarch builds (#11752) 2021-03-10 09:41:44 -08:00
Klaus Post
952b0f111d Update S2 compression (#11753)
Relevant updates:

* Less allocations on decode: https://github.com/klauspost/compress/pull/322
* Fixed rare out-of-bounds write on amd64.
* ARM64 decompression assembly. Around 2x output speed. https://github.com/klauspost/compress/pull/324
* Speed up decompression on non-assembly platforms. https://github.com/klauspost/compress/pull/328

Upgrade cpuid to match simdjson.
2021-03-10 09:41:29 -08:00
Harshavardhana
777344a594 add release build-arg to docker multiarch builds (#11754)
additional paths to ignore for healing
2021-03-10 09:38:35 -08:00
Minio Trusted
9d118b372e Update yaml files to latest version RELEASE.2021-03-10T05-11-33Z 2021-03-10 05:34:48 +00:00
Poorna Krishnamoorthy
878bc6c72b Redact LDAP password if any in request trace (#11750)
Fixes: #11742
2021-03-09 14:43:16 -08:00
Klaus Post
fdc2f69218 truncate xl.meta files upon rewrites #11749)
If the destination files exist and is larger - junk data will be left at the end of the file.
2021-03-09 14:42:24 -08:00
Anis Elleuch
0d124095ea lc: Return expiration header only when version id is unspecified (#11718)
Follow S3 specification to return Expiration header in HEAD/GET call
only when version-id is not passed in the request.
2021-03-09 13:19:08 -08:00
Harshavardhana
691035832a fix: normalize object layer inputs (#11534)
Cases where we have applications making request
for `//` in object names make sure that all
are normalized to `/` and all such requests that
are prefixed '/' are removed. To ensure a
consistent view from all operations.
2021-03-09 12:58:22 -08:00
Anis Elleuch
eac66e67ec Use maximum parity for config files (#11740)
Some deployments have low parity (EC:2), but we really do not need to
save our config data with the same parity configuration.

N/2 would be better to keep MinIO configurations intact when unexpected
a number of drives fail.
2021-03-09 10:19:47 -08:00
Anis Elleuch
57f3ed22d4 erasure: Reduce the interval of cleaning up .trash folder (#11741)
Reduce from 30 to 10 minutes.
2021-03-09 09:45:38 -08:00
Poorna Krishnamoorthy
2f29719e6b resize replication worker pool dynamically after config update (#11737) 2021-03-09 02:56:42 -08:00
Andreas Auernhammer
209fe61dcc vault: disable Hashicorp Vault with opt-in (#11711)
This commit disables the Hashicorp Vault
support but provides a way to temp. enable
it via the `MINIO_KMS_VAULT_DEPRECATION=off`

Vault support has been deprecated long ago
and this commit just requires users to take
action if they maintain a Vault integration.
2021-03-09 00:02:35 -08:00
Harshavardhana
8ecffdb7a7 Revert "Revert "heal: Heal bucket metadata when a fresh disk is inserted (#11734)""
This reverts commit 806df164b2.
2021-03-08 16:12:17 -08:00
Harshavardhana
806df164b2 Revert "heal: Heal bucket metadata when a fresh disk is inserted (#11734)"
This reverts commit 64662a49ff.
2021-03-08 14:43:24 -08:00
Klaus Post
4ac9ed4248 CopyObject: Do not remove crypto info when compressed (#11702)
Removing crypto info makes it impossible to copy encrypted+compressed objects.

Disable destination compression when encrypted.
2021-03-08 12:57:54 -08:00
Harshavardhana
3d8c512bba update browser package.json 2021-03-08 11:35:37 -08:00
Klaus Post
3ff5f55dcb Fetch fileinfo concurrently (#11700)
For non-erasure setups fetch up to 10 fileinfos concurrently.

Fixes #11625
2021-03-08 11:30:43 -08:00
Max Xu
097e5eba9f feat: remove go-bindata-assetfs in favor of embed by upgrading to go1.16 (#11733) 2021-03-08 11:26:43 -08:00
Andreas Auernhammer
ba6930bb13 fips: always enable AES in FIPS mode when using madmin (#11732)
This commit adds FIPS-specifc build tags to the madmin
package. When madmin is compiled with `--tags "fips"`
it will always use AES-GCM for encryption - not just
when an optimized AES implementation is available.
2021-03-08 10:58:02 -08:00
Anis Elleuch
64662a49ff heal: Heal bucket metadata when a fresh disk is inserted (#11734)
Replacing disk with a fresh one never heals bucket metadata (policy,
notification, etc..). This commit fixes the issue.
2021-03-08 10:54:13 -08:00
Harshavardhana
78e867e145 ignore healing .trash, .metacache amd .multipart paths (#11725) 2021-03-07 09:38:31 -08:00
Harshavardhana
9ccc483df6 [feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random

```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```

TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```

TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```

Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons

TTFB still had improvements with full object reads with 1MiB

```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```

v/s

TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```

This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 14:09:34 -08:00
Anis Elleuch
abce040088 fix: Remove repetitive IAM ready message (#11723)
"IAM initialization complete" is printed each 5 minutes, avoid this by
printing it only during the first initialization of IAM.
2021-03-06 09:27:46 -08:00
Anis Elleuch
558762bdf6 iam: Return a slice of policies for a group (#11722)
A group can have multiple policies, a user subscribed to readwrite &
diagnostics can perform S3 operations & admin operations as well.
However, the current code only returns one policy for one group.
2021-03-06 09:27:06 -08:00
Harshavardhana
d971061305 use listPathRaw for HealObjects() instead of expensive WalkVersions() (#11675) 2021-03-06 09:25:48 -08:00
Andreas Auernhammer
509bcc01ad fips: do not use SHA-3 when building a FIPS-140 2 binary (#11710)
This commit disables SHA-3 for OpenID when building a
FIPS-140 2 compatible binary. While SHA-3 is a
crypto. hash function accepted by NIST there is no
FIPS-140 2 compliant implementation available when
using the boringcrypto Go branch.

Therefore, SHA-3 must not be used when building
a FIPS-140 2 binary.
2021-03-05 20:43:42 -08:00
Anis Elleuch
7ea95fcec8 Add mint versioning tests (#11500)
Co-authored-by: Harshavardhana <harsha@minio.io>
2021-03-05 19:15:42 -08:00
Krishnan Parthasarathi
79b0d056a2 lifecycle: don't transition delete markers (#11692) 2021-03-05 15:29:27 -08:00
Harshavardhana
ec547c0fa8 enable race detector CI for macos-latest (#11715) 2021-03-05 14:16:23 -08:00
Krishnan Parthasarathi
bcf9825082 Data usage should account for transitioned objects (#11717) 2021-03-05 14:15:53 -08:00
Harshavardhana
651487507a fix: Merge() should merge and return a copy (#11714)
fixes #11713
2021-03-05 09:42:46 -08:00
sgandon
124816f6a6 fix : IAM Intialization failing with a large number of users/policies (#11701) 2021-03-05 08:36:16 -08:00
Klaus Post
fa9cf1251b Imporve healing and reporting (#11312)
* Provide information on *actively* healing, buckets healed/queued, objects healed/failed.
* Add concurrent healing of multiple sets (typically on startup).
* Add bucket level resume, so restarts will only heal non-healed buckets.
* Print summary after healing a disk is done.
2021-03-04 14:36:23 -08:00
Harshavardhana
97e7a902d0 fix: shellcheck mint shellscript 2021-03-04 14:28:13 -08:00
Harshavardhana
d73d756a80 fix: incorrect errors thrown by lint (#11699)
fixes #11698
2021-03-04 14:27:38 -08:00
Aditya Manthramurthy
7488c77e7c Test LDAP connection configuration at startup (#11684) 2021-03-04 12:17:36 -08:00
Harshavardhana
786585009e fix: capture disks when entire peer is offline (#11697)
currently when one of the peer is down, the
drives from that peer are reported as '0/0'
offline instead we should capture/filter the
drives from the peer and populate it appropriately
such that `mc admin info` displays correct info.
2021-03-04 10:07:05 -08:00
Anis Elleuch
7be7109471 locking: Add Refresh for better locking cleanup (#11535)
Co-authored-by: Anis Elleuch <anis@min.io>
Co-authored-by: Harshavardhana <harsha@minio.io>
2021-03-03 18:36:43 -08:00
Minio Trusted
464fa08f2e Update yaml files to latest version RELEASE.2021-03-04T00-53-13Z 2021-03-04 01:15:49 +00:00
Klaus Post
c3217bd6eb Use actual size for buffer selection (#11687)
For compressed inputs, this will be -1, but the object may be small.
2021-03-03 16:28:10 -08:00
Andreas Auernhammer
f14cc6c943 etag: add FromContentMD5 to parse content-md5 as ETag (#11688)
This commit adds the `FromContentMD5` function to
parse a client-provided content-md5 as ETag.

Further, it also adds multipart ETag computation
for future needs.
2021-03-03 12:58:28 -08:00
Harshavardhana
2c198ae7b6 fix: prometheus metrics disks_online count when disks are down (#11689)
prometheus metrics was using total disks instead
of online disk count, when disks were down, this
PR fixes this and also adds a new metric for
total_disk_count
2021-03-03 11:18:41 -08:00
Poorna Krishnamoorthy
690434514d Avoid notification event for replicas (#11683)
Creating notification events for replica creation
is not particularly useful to send as the notification
event generated at source already includes replication
completion events.

For applications using replica cluster as failover, avoiding
duplicate notifications for replica event will allow seamless
failover.
2021-03-03 11:13:31 -08:00
Harshavardhana
039f59b552 fix: missing user policy enforcement in PostPolicyHandler (#11682) 2021-03-03 08:47:08 -08:00
Harshavardhana
c6a120df0e fix: Prometheus metrics to re-use storage disks (#11647)
also re-use storage disks for all `mc admin server info`
calls as well, implement a new LocalStorageInfo() API
call at ObjectLayer to lookup local disks storageInfo

also fixes bugs where there were double calls to StorageInfo()
2021-03-02 17:28:04 -08:00
Klaus Post
cd9e30c0f4 IAM: Block while loading users (#11671)
While starting up a request that needs all IAM data will start another load operation if the first on startup hasn't finished. This slows down both operations.

Block these requests until initial load has completed.

Blocking calls will be ListPolicies, ListUsers, ListServiceAccounts, ListGroups - and the calls that eventually trigger these. These will wait for the initial load to complete.

Fixes issue seen in #11305
2021-03-02 17:08:25 -08:00
Harshavardhana
f96d4cf7d3 fix: do not deny admins to change other passwords
fixes a regression from #11680
2021-03-02 17:02:32 -08:00
Harshavardhana
879599b0cf fix: enforce deny if present for implicit permissions (#11680)
Implicit permissions for any user is to be allowed to
change their own password, we need to restrict this
further even if there is an implicit allow for this
scenario - we have to honor Deny statements if they
are specified.
2021-03-02 15:35:50 -08:00
Harshavardhana
b1bb3f7016 [feat]: implement GetBucketPolicyStatus API (#11673)
additionally also add more APIs in notImplemented
list, adjust routing rules appropriately
2021-03-01 23:10:33 -08:00
Anis Elleuch
e8d8dfa3ae Add metric for internode RPC calls errors (#11669) 2021-03-01 12:31:33 -08:00
Nitish Tiwari
bbd1244a88 Add support for mTLS for Audit log target (#11645) 2021-03-01 09:19:13 -08:00
Klaus Post
10bdb78699 fix: listObjectVersions Include object in marker (#11562)
ListObjectVersions would skip past the object in the marker when version id is specified. 
Make `listPath` return the object with the marker and truncate it if not needed.

Avoid having to parse unintended objects to find a version marker.
2021-03-01 08:12:02 -08:00
Shireesh Anjal
289b22d911 fix: pool number not added for one server (#11670)
The previous code was iterating over replies from peers and assigning
pool numbers to them, thus missing to add it for the local server.

Fixed by iterating over the server properties of all the servers
including the local one.
2021-03-01 08:09:43 -08:00
Harshavardhana
0b9c17443e update gopsutil to use the v3 API (#11638) 2021-03-01 00:15:46 -08:00
Bala FA
23f7ab40b3 Add PoolNumber field to madmin.ServerProperties (#11327) 2021-02-28 21:26:28 -08:00
Minio Trusted
e3f8830ab7 Update yaml files to latest version RELEASE.2021-03-01T04-20-55Z 2021-03-01 04:43:28 +00:00
Harshavardhana
2f4af09c01 fix: alow changes to readAllData to decrement activeCount() 2021-02-28 20:09:23 -08:00
Harshavardhana
37960cbc2f fix: avoid writing more content on network with O_DIRECT reads (#11659)
There was an io.LimitReader was missing for the 'length'
parameter for ranged requests, that would cause client to
get truncated responses and errors.

fixes #11651
2021-02-28 15:33:03 -08:00
cbows
c67d1bf120 add unauthenticated lookup-bind mode to LDAP identity (#11655)
Closes #11646
2021-02-28 12:57:31 -08:00
Klaus Post
c5b3a675fa Block profiling tweaks (#11612)
The base profiles contains no valuable data, don't record them.

Reduce block rate by 2 orders of magnitude, should still capture just as valuable data with less CPU strain.
2021-02-27 09:22:14 -08:00
Harshavardhana
b690304eed use faster way for siphash (#11640) 2021-02-26 16:53:06 -08:00
Harshavardhana
9171d6ef65 rename all references from crawl -> scanner (#11621) 2021-02-26 15:11:42 -08:00
Harshavardhana
6386b45c08 [feat] use rename instead of recursive deletes (#11641)
most of the delete calls today spend time in
a blocking operation where multiple calls need
to be recursively sent to delete the objects,
instead we can use rename operation to atomically
move the objects from the namespace to `tmp/.trash`

we can schedule deletion of objects at this
location once in 15, 30mins and we can also add
wait times between each delete operation.

this allows us to make delete's faster as well
less chattier on the drives, each server runs locally
a groutine which would clean this up regularly.
2021-02-26 09:52:27 -08:00
Andreas Auernhammer
1f659204a2 remove GetObject from ObjectLayer interface (#11635)
This commit removes the `GetObject` method
from the `ObjectLayer` interface.

The `GetObject` method is not longer used by
the HTTP handlers implementing the high-level
S3 semantics. Instead, they use the `GetObjectNInfo`
method which returns both, an object handle as well
as the object metadata.

Therefore, it is no longer necessary that a concrete
`ObjectLayer` implements `GetObject`.
2021-02-26 09:52:02 -08:00
Harshavardhana
f9f6fd0421 fix: service account permissions generated from LDAP user (#11637)
service accounts generated from LDAP parent user
did not inherit correct permissions, this PR fixes
this fully.
2021-02-25 13:49:59 -08:00
Klaus Post
85620dfe93 use bucket in path in distribution hash (#11634)
Use bucket in erasure distribution hash.

For the rare cases where objects with the same names are uploaded to many buckets.
2021-02-25 10:11:31 -08:00
Harshavardhana
a8e4f64ff3 Revert "fix: remove persistence layer for metacache store in memory (#11538)"
This reverts commit b23659927c.
2021-02-24 22:24:51 -08:00
Krishnan Parthasarathi
ca5c6e3160 fix: translate empty versionID string to null version where appropriate (#11629)
We store the null version as empty string. We should translate it to null
version for bucket with version suspended too.
2021-02-24 18:39:10 -08:00
Harshavardhana
b23659927c fix: remove persistence layer for metacache store in memory (#11538)
store the cache in-memory instead of disks to avoid large
write amplifications for list heavy workloads, store in
memory instead and let it auto expire.
2021-02-24 15:51:41 -08:00
Minio Trusted
b912e9ab41 Update yaml files to latest version RELEASE.2021-02-24T18-44-45Z 2021-02-24 19:08:36 +00:00
Andreas Auernhammer
c1a49be639 use crypto/sha256 for FIPS 140-2 compliance (#11623)
This commit replaces the usage of
github.com/minio/sha256-simd with crypto/sha256
of the standard library in all non-performance
critical paths.

This is necessary for FIPS 140-2 compliance which
requires that all crypto. primitives are implemented
by a FIPS-validated module.

Go can use the Google FIPS module. The boringcrypto
branch of the Go standard library uses the BoringSSL
FIPS module to implement crypto. primitives like AES
or SHA256.

We only keep github.com/minio/sha256-simd when computing
the content-SHA256 of an object. Therefore, this commit
relies on a build tag `fips`.

When MinIO is compiled without the `fips` flag it will
use github.com/minio/sha256-simd. When MinIO is compiled
with the fips flag (go build --tags "fips") then MinIO
uses crypto/sha256 to compute the content-SHA256.
2021-02-24 09:00:15 -08:00
Klaus Post
03172b89e2 Ensure cache has finished deserializing (#11620)
Make sure that response has been fully deserialized before returning.
2021-02-24 02:59:49 -08:00
Harshavardhana
b517c791e9 [feat]: use DSYNC for xl.meta writes and NOATIME for reads (#11615)
Instead of using O_SYNC, we are better off using O_DSYNC
instead since we are only ever interested in data to be
persisted to disk not the associated filesystem metadata.

For reads we ask customers to turn off noatime, but instead
we can proactively use O_NOATIME flag to avoid atime updates
upon reads.
2021-02-24 00:14:16 -08:00
Petr Tichý
14aef52004 remove Content-MD5 on Range requests (#11611)
This removes the Content-MD5 response header on Range requests in Azure
Gateway mode. The partial content MD5 doesn't match the full object MD5
in metadata.
2021-02-23 19:32:56 -08:00
Harshavardhana
67b20125e4 fix: do not ignore CREDITS file 2021-02-23 12:34:59 -08:00
Andreas Auernhammer
d4b822d697 pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.

Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.

In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.

Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.

One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.

This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
   reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.

The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 12:31:53 -08:00
Minio Trusted
1b63291ee2 Update yaml files to latest version RELEASE.2021-02-23T20-05-01Z 2021-02-23 20:28:30 +00:00
Harshavardhana
aa7244a9a4 fix: make sure to convert the error properly in HealBucket() (#11610)
server startup code expects the object layer to properly
convert error into a proper type, so that in situations when
servers are coming up and quorum is not available servers
wait on each other.
2021-02-23 09:23:11 -08:00
Harshavardhana
2a79ea0332 isServerResolvable its sufficient to check server is reachable (#11609)
using isServerResolvable for expiration can lead to chicken
and egg problems, a lock might expire knowingly when server
is booting up causing perpetual locks getting expired.
2021-02-22 16:29:53 -08:00
Ritesh H Shukla
6e5c61d917 Skip printing error if empty for reporting bandwidth (#11606) 2021-02-22 13:41:40 -08:00
Aditya Manthramurthy
02e7de6367 LDAP config: fix substitution variables (#11586)
- In username search filter and username format variables we support %s for
replacing with the username.

- In group search filter we support %s for username and %d for the full DN of
the username.
2021-02-22 13:20:36 -08:00
Harshavardhana
cec12f4c76 update sha256-simd to v1.0.0 (#11607) 2021-02-22 13:19:53 -08:00
Harshavardhana
da676ac298 remove network calls for getLocalDisks (#11603) 2021-02-22 13:19:44 -08:00
Harshavardhana
18ec933085 fix: for containers use root-disk detection cleverly (#11593)
root-disk implemented currently had issues where root
disk partitions getting modified might race and provide
incorrect results, to avoid this lets rely again back on
DeviceID and match it instead.

In-case of containers `/data` is one such extra entity that
needs to be verified for root disk, due to how 'overlay'
filesystem works and the 'overlay' presents a completely
different 'device' id - using `/data` as another entity
for fallback helps because our containers describe 'VOLUME'
parameter that allows containers to automatically have a
virtual `/data` that points to the container root path this
can either be at `/` or `/var/lib/` (on different partition)
2021-02-22 10:32:21 -08:00
Harshavardhana
c31d2c3fdc fix: CrawlAndGetDataUsage close pipe() before using a new one (#11600)
also additionally make sure errors during deserializer closes
the reader with right error type such that Write() end
actually see the final error, this avoids a waitGroup usage
and waiting.
2021-02-22 10:04:32 -08:00
Harshavardhana
8778828a03 fix: read metadata in O_DIRECT if configured and supported (#11594)
reduce the page-cache pressure completely by moving
the entire read-phase of our operations to O_DIRECT,
primarily this is going to be very useful for chatty
metadata operations such as listing, scanner, ilm, healing
like operations to avoid filling up the page-cache upon
repeated runs.
2021-02-22 01:36:17 -08:00
Sarasa Kisaragi
48b212dd8e Fix HDFS wrong filepath if subpath provided (#11574) 2021-02-20 15:32:18 -08:00
Harshavardhana
be7de911c4 fix: update minio-go to fix an issue with S3 gateway (#11591)
since we have changed our default envs to MINIO_ROOT_USER,
MINIO_ROOT_PASSWORD this was not supported by minio-go
credentials package, update minio-go to v7.0.10 for this
support. This also addresses few bugs related to users
had to specify AWS_ACCESS_KEY_ID as well to authenticate
with their S3 backend if they only used MINIO_ROOT_USER.
2021-02-20 11:10:21 -08:00
Harshavardhana
8cad407e0b fix: Bring support for symlink on regular files on NAS (#11383)
fixes #11203
2021-02-20 00:30:12 -08:00
Poorna Krishnamoorthy
85d2187c20 fix: ETag mismatch for large upload in replica (#11587) 2021-02-20 00:22:17 -08:00
Anis Elleuch
98d3f94996 metrics: Add the number of requests in the waiting queue (#11580)
We can use this metric to check if there are too many S3 clients in the
queue and could explain why some of those S3 clients are timing out.

```
minio_s3_requests_waiting_total{server="127.0.0.1:9000"} 9981
```

If max_requests is 10000 then there is a strong possibility that clients
are timing out because of the queue deadline.
2021-02-20 00:21:55 -08:00
mailsmail
173284903b fix incorrect http range in SelectObjectContentHandler (#11585) 2021-02-19 17:55:28 -08:00
WangYuMu
c70240b893 fix incorrect values in sizing guide (#11583) 2021-02-19 10:05:04 -08:00
Harshavardhana
8ba2136e06 Update yaml files to latest version RELEASE.2021-02-19T04-38-02Z 2021-02-18 21:02:25 -08:00
Poorna Krishnamoorthy
2dce5d9442 fix: delete marker permanent delete replication (#11581) 2021-02-18 16:35:37 -08:00
Anis Elleuch
f28b063091 heal: Use healDeleteDangling global const in self healing (#11579)
A small fix, use healDeleteDangling constant instead of 'true' in the
self-healing code.
2021-02-18 15:16:20 -08:00
Klaus Post
90abea5b7a check if kafka producer is connected (#11578)
fixes #11576
2021-02-18 11:14:27 -08:00
Klaus Post
c5b2a8441b fix: faster healing when disk is replaced. (#11520) 2021-02-18 11:06:54 -08:00
Harshavardhana
0f5ca83418 move CI to go1.15 and go1.16 (#11570) 2021-02-18 09:42:32 -08:00
Klaus Post
8a6b13c239 Avoid synchronizing usage writes (#11560)
If the periodic `case <-t.C:` save gets held up for a long time it will end up 
synchronize all disk writes for saving the caches.

We add jitter to per set writes so they don't sync up and don't hold a 
lock for the write, since it isn't needed anyway.

If an outage prevents writes for a long while we also add individual 
waits for each disk in case there was a queue.

Furthermore limit the number of buffers kept to 2GiB, since this could get 
huge in large clusters. This will not act as a hard limit but should be enough 
for normal operation.
2021-02-18 00:38:37 -08:00
Poorna Krishnamoorthy
8e8a792d9d Allow delete marker replication from replica (#11566)
in the case of active-active replication.

This PR also has the following changes:

- add docs on replication design
- fix corner case of completing versioned delete on a delete marker
  when the target is down and `mc rm --vid` is performed repeatedly. Instead
  the version should still be retained in the `PENDING|FAILED` state until
  replication sync completes.
- remove `s3:Replication:OperationCompletedReplication` and
   `s3:Replication:OperationFailedReplication` from ObjectCreated 
  events type
2021-02-18 00:33:51 -08:00
Harshavardhana
95e0acbb26 fix: allow accountInfo with creds with parentUsers (#11568) 2021-02-17 20:57:17 -08:00
Poorna Krishnamoorthy
55037e6e54 lifecycle:Fix args passed to determine expiry header (#11567) 2021-02-17 19:25:19 -08:00
Harshavardhana
289e1d8b2a fix: reduce crawler memory usage by orders of magnitude (#11556)
currently crawler waits for an entire readdir call to
return until it processes usage, lifecycle, replication
and healing - instead we should pass the applicator all
the way down to avoid building any special stack for all
the contents in a single directory.

This allows for

- no need to remember the entire list of entries per directory
  before applying the required functions
- no need to wait for entire readdir() call to finish before
  applying the required functions
2021-02-17 15:34:42 -08:00
Anis Elleuch
e07918abe3 lifecycle: Fix expiration header in some cases (#11565)
Expiration header was not correctly computed in case of non current
versions.

The behavior is fixed in this commit.
2021-02-17 14:51:29 -08:00
Harshavardhana
ffea6fcf09 fix: rename crawler as scanner in config (#11549) 2021-02-17 12:04:11 -08:00
Klaus Post
11b2220696 Don't autoheal if disks are healing (#11558)
Don't spawn automatic healing ops if a disk is healing.
2021-02-17 10:18:12 -08:00
Harshavardhana
aa8450a2a1 fix: parallelize getPoolIdx() for object lookup (#11547) 2021-02-16 19:36:15 -08:00
Harshavardhana
87cce344f6 default to common conditions if conditions not present (#11546)
fixes #11544
2021-02-16 11:56:45 -08:00
Harshavardhana
7d4a2d2b68 fix: multiple pool reads parallelize when possible (#11537) 2021-02-16 02:43:47 -08:00
Minio Trusted
cfc8b92dff Update yaml files to latest version RELEASE.2021-02-14T04-01-33Z 2021-02-14 04:25:52 +00:00
Anis Elleuch
c4e12dc846 fix: in MultiDelete API return MalformedXML upon empty input (#11532)
To follow S3 spec
2021-02-13 09:48:25 -08:00
Harshavardhana
a94a9c37fa fix: support IAM policy handling for wildcard actions (#11530)
This PR fixes

- allow 's3:versionid` as a valid conditional for
  Get,Put,Tags,Object locking APIs
- allow additional headers missing for object APIs
- allow wildcard based action matching
2021-02-12 23:05:09 -08:00
Harshavardhana
79b6a43467 fix: avoid timed value for network calls (#11531)
additionally simply timedValue to have RWMutex
to avoid concurrent calls to DiskInfo() getting
serialized, this has an effect on all calls that
use GetDiskInfo() on the same disks.

Such as getOnlineDisks, getOnlineDisksWithoutHealing
2021-02-12 18:17:52 -08:00
Shireesh Anjal
928de04f7a fix: osinfos incomplete in case of warnings (#11505)
The function used for getting host information
(host.SensorsTemperaturesWithContext) returns warnings in some cases.

Returning with error in such cases means we miss out on the other useful
information already fetched (os info).

If the OS info has been succesfully fetched, it should always be
included in the output irrespective of whether the other data (CPU
sensors, users) could be fetched or not.
2021-02-12 17:57:57 -08:00
Poorna Krishnamoorthy
93fd248b52 fix: save ModTime properly in disk cache (#11522)
fix #11414
2021-02-11 19:25:47 -08:00
Harshavardhana
2a7b123895 turn off http2 for TLS setups for now (#11523)
due to lots of issues with x/net/http2, as
well as the bundled h2_bundle.go in the go
runtime should be avoided for now.

https://github.com/golang/go/issues/23559
https://github.com/golang/go/issues/42534
https://github.com/golang/go/issues/43989
https://github.com/golang/go/issues/33425
https://github.com/golang/go/issues/29246

With collection of such issues present, it
make sense to remove HTTP2 support for now
2021-02-11 15:53:04 -08:00
Harshavardhana
b3c56b53fb fix: metacache should only rename entries during cleanup (#11503)
To avoid large delays in metacache cleanup, use rename
instead of recursive delete calls, renames are cheaper
move the content to minioMetaTmpBucket and then cleanup
this folder once in 24hrs instead.

If the new cache can replace an existing one, we should
let it replace since that is currently being saved anyways,
this avoids pile up of 1000's of metacache entires for
same listing calls that are not necessary to be stored
on disk.
2021-02-11 10:22:03 -08:00
Minio Trusted
0ef3e359d8 Update yaml files to latest version RELEASE.2021-02-11T08-23-43Z 2021-02-11 08:47:10 +00:00
Poorna Krishnamoorthy
f24d8127ab fix: DeleteMultipleObjectsHandler to process deleted objects correctly (#11515)
DeleteMarkerVersionID which is returned by the lower layer should 
not be used in the key to lookup ObjectToDelete map
2021-02-10 23:41:41 -08:00
Harshavardhana
7875d472bc avoid notification for non-existent delete objects (#11514)
Skip notifications on objects that might have had
an error during deletion, this also avoids unnecessary
replication attempt on such objects.

Refactor some places to make sure that we have notified
the client before we

- notify
- schedule for replication
- lifecycle etc.
2021-02-10 22:00:42 -08:00
Harshavardhana
711adb9652 remove ipv6 fallbackdelay leave it as default 2021-02-10 17:35:09 -08:00
Poorna Krishnamoorthy
e6b4ea7618 More fixes for delete marker replication (#11504)
continuation of PR#11491 for multiple server pools and
bi-directional replication.

Moving proxying for GET/HEAD to handler level rather than
server pool layer as this was also causing incorrect proxying 
of HEAD.

Also fixing metadata update on CopyObject - minio-go was not passing
source version ID in X-Amz-Copy-Source header
2021-02-10 17:25:04 -08:00
Aditya Manthramurthy
466e95bb59 Return group DN instead of group name in LDAP STS (#11501)
- Additionally, check if the user or their groups has a policy attached during
the STS call.

- Remove the group name attribute configuration value.
2021-02-10 16:52:49 -08:00
Harshavardhana
881f98e511 fix: use getPoolIdx in DeleteObjects() (#11513)
filter out relevant objects for each pool to
avoid calling, further delete operations on
subsequent pools where some of these objects
might not exist.

This is mainly useful to avoid situations
during bi-directional bucket replication.
2021-02-10 14:25:43 -08:00
Harshavardhana
cbf4bb62e0 fix: getPoolIdx decouple from top level options (#11512)
top-level options shouldn't be passed down for
GetObjectInfo() while verifying the objects in
different pools, this is to make sure that
we always get the value from the pool where
the object exists.
2021-02-10 11:45:02 -08:00
Anis Elleuch
682482459d Change the default object content-type to binary/octet-stream (#11508) 2021-02-10 08:56:37 -08:00
Krishnan Parthasarathi
b87fae0049 Simplify PutObjReader for plain-text reader usage (#11470)
This change moves away from a unified constructor for plaintext and encrypted
usage. NewPutObjReader is simplified for the plain-text reader use. For
encrypted reader use, WithEncryption should be called on an initialized PutObjReader.

Plaintext:
func NewPutObjReader(rawReader *hash.Reader) *PutObjReader

The hash.Reader is used to provide payload size and md5sum to the downstream
consumers. This is different from the previous version in that there is no need
to pass nil values for unused parameters.

Encrypted:
func WithEncryption(encReader *hash.Reader,
key *crypto.ObjectKey) (*PutObjReader, error)

This method sets up encrypted reader along with the key to seal the md5sum
produced by the plain-text reader (already setup when NewPutObjReader was
called).

Usage:
```
  pReader := NewPutObjReader(rawReader)
  // ... other object handler code goes here

  // Prepare the encrypted hashed reader
  pReader, err = pReader.WithEncryption(encReader, objEncKey)

```
2021-02-10 08:52:50 -08:00
Anis Elleuch
b8b44c879f lifecycle: Remove a single delete marker with noncurrent expiry rule (#11444)
NoncurrentVersionExpiry can remove single delete markers according to
S3 spec:

```
The NoncurrentVersionExpiration action in the same Lifecycle
configuration removes noncurrent objects 30 days after they become
noncurrent. Thus, in this example, all object versions are permanently
removed 90 days after object creation. You will have expired object
delete markers, but Amazon S3 detects and removes the expired object
delete markers for you.
```
2021-02-10 08:51:34 -08:00
Harshavardhana
f53d1de87f fix: missing data on multiple columns reading parquet (#11499)
fixes #11413
2021-02-10 08:49:48 -08:00
Shireesh Anjal
5a18d437ce fix: drive hw info incomplete when smartinfo fails (#11509)
Collection of SMART information doesn't work in certain scenarios e.g.
in a container based setup. In such cases, instead of returning an error
(without any data), we should only set the error on the smartinfo
struct, so that other important drive hw info like device, mountpoint,
etc is retained in the output.
2021-02-10 08:48:14 -08:00
Poorna Krishnamoorthy
93eb549a83 fix: duplicate delete marker attempts in bi-directional replication (#11491) 2021-02-09 15:11:43 -08:00
Harshavardhana
fe3c39b583 use the new errgroup API whereever applicable (#11466)
start using the new errgroup concurrency control
API introduced in #11457
2021-02-09 12:08:25 -08:00
Harshavardhana
84d400487f fix: accountInfo API to cater for federated setups (#11484)
when MinIO is deployed in a federated setup, use etcd 
based listing of buckets to provide appropriate filtering 
of buckets per user.
2021-02-09 09:53:07 -08:00
Shireesh Anjal
3afa499885 fix: empty buckets/objects nodes in new setup (#11493) 2021-02-09 09:52:38 -08:00
Shireesh Anjal
13d015cf93 Fix make target hotfix (#11492)
The code inside the `hotfix` taget is overriding the values set at the
beginning of the Makefile affecting other make targets as well.

For example, running `TAG=mytag make docker` also ends up tagging
the docker image as a hotfix instead of `mytag`.

Using the `eval` function inside the `hotfix` target fixes this.
2021-02-09 09:29:43 -08:00
Krishna Srinivas
876b79b8d8 read-health check endpoint returns success if cluster can serve read requests (#11310) 2021-02-09 01:00:44 -08:00
Ritesh H Shukla
3d74efa6b1 fux: copy object for encrypted objects (#11490) 2021-02-08 19:58:17 -08:00
Harshavardhana
68d299e719 fix: case-insensitive lookups for metadata (#11489)
continuation of #11487, with more changes
2021-02-08 18:12:28 -08:00
Poorna Krishnamoorthy
f9c5636c2d fix: lookup metdata case insensitively (#11487)
while setting replication options
2021-02-08 16:19:05 -08:00
Klaus Post
9b10118d34 Metacache add abs entry limit (#11483)
Add an absolute limit to the number of metacaches for a bucket.

Delete excess caches if they haven't been handed out in an hour.
2021-02-08 11:36:16 -08:00
Harshavardhana
0e3211f4ad fix: server upgrades should have more descriptive error messages (#11476)
during rolling upgrade, provide a more descriptive error
message and discourage rolling upgrade in such situations,
allowing users to take action.

additionally also rename `slashpath -> pathutil` to avoid
a slighly mis-pronounced usage of `path` package.
2021-02-08 10:15:12 -08:00
Harshavardhana
2e4d9124ad honor region specified for remote targets (#11480)
fixes #11472
2021-02-08 08:54:27 -08:00
Harshavardhana
6fef4c21b9 fix: align atomic variables for 32bit arch (#11475)
fixes #11474
2021-02-08 08:51:12 -08:00
Poorna Krishnamoorthy
8e1bbd989a replication:alloc UserDefined map before use (#11478) 2021-02-07 22:01:10 -08:00
Sarasa Kisaragi
152d7cd95b HDFS support keytab (#11473) 2021-02-07 17:29:47 -08:00
Harshavardhana
74080bf108 update CREDITS file 2021-02-07 00:37:12 -08:00
Minio Trusted
647a209c73 Update yaml files to latest version RELEASE.2021-02-07T01-31-02Z 2021-02-07 01:53:27 +00:00
Harshavardhana
0d057c777a remove restriction for multi pool distribution algo 2021-02-06 16:19:05 -08:00
Anis Elleuch
275f7a63e8 lc: Apply DeleteAction correctly to objects (#11471)
When lifecycle decides to Delete an object and not a version in a
versioned bucket, the code should create a delete marker and not
removing the scanned version.

This commit fixes the issue.
2021-02-06 16:10:33 -08:00
Shireesh Anjal
97fe57bba9 Remove Connections from SysProcess struct (#11373)
The connections info of the processes takes up a huge amount of space,
and is not important for adding any useful health checks. Removing it
will significantly reduce the size of the subnet health report.
2021-02-05 21:32:28 -08:00
Harshavardhana
88c1bb0720 fix: improper ticker usage in goroutines (#11468)
- lock maintenance loop was incorrectly sleeping
  as well as using ticker badly, leading to
  extra expiration routines getting triggered
  that could flood the network.

- multipart upload cleanup should be based on
  timer instead of ticker, to ensure that long
  running jobs don't get triggered twice.

- make sure to get right lockers for object name
2021-02-05 19:23:48 -08:00
Harshavardhana
1fdafaf72f fix: listing for directory object when delimiter is present (#11463)
When you have heirarchy of prefixes with directory objects
our current master would list directory objects as prefixes
when delimiter is present, this is inconsistent with AWS S3

```
aws s3api list-objects --endpoint-url http://localhost:9000 \
    --profile minio --bucket testbucket-v --prefix new/ --delimiter /
{
    "CommonPrefixes": [
        {
            "Prefix": "new/"
        },
        {
            "Prefix": "new/new/"
        }
    ]
}
```

Instead this PR fixes this to behave like AWS S3

```
aws s3api list-objects --endpoint-url http://localhost:9000 \
      --profile minio --bucket testbucket-v --prefix new/ --delimiter /
{
    "Contents": [
        {
            "Key": "new/",
            "LastModified": "2021-02-05T06:27:42.660Z",
            "ETag": "\"d41d8cd98f00b204e9800998ecf8427e\"",
            "Size": 0,
            "StorageClass": "STANDARD",
            "Owner": {
                "DisplayName": "",
                "ID": "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4"
            }
        }
    ],
    "CommonPrefixes": [
        {
            "Prefix": "new/new/"
        }
    ]
}
```
2021-02-05 16:24:40 -08:00
Ritesh H Shukla
5fe4bb6b36 Reduce redundant crawler logging (#11448) 2021-02-05 15:51:11 -08:00
Harshavardhana
99b733d44c fix: deletion of delete marker regression (#11465)
fixes #11440
fixes #11451
fixes #11454
2021-02-05 15:06:23 -08:00
Klaus Post
b4ac05523b Add parallel bucket healing during startup (#11457)
Replaces #11449

Does concurrent healing but limits concurrency to 50 buckets.

Aborts on first error.

`errgroup.Group` is extended to facilitate this in a generic way.
2021-02-05 13:04:26 -08:00
Anis Elleuch
c7eacba41c health-info: Add tags to errors (#11412)
We use multiple libraries in health info, but the returned error does
not indicate exactly what library call is failing, hence adding named
tags to returned errors whenever applicable.
2021-02-05 12:37:15 -08:00
Anis Elleuch
1887c25279 xl: Fix feeding NumVersions & SuccessorModTime to lifecycle (#11462)
After recent refactor where lifecycle started to rely on ObjectInfo to
make decisions, it turned out there are some issues calculating
Successor Modtime and NumVersions, hence the lifecycle is not working as
expected in a versioning bucket in some cases.

This commit fixes the behavior.
2021-02-05 11:59:08 -08:00
Harshavardhana
c9b0f595b9 support directory objects in listing in certain scenarios (#11452)
When a directory object is presented as a `prefix`
param our implementation tend to only list objects
present common to the `prefix` than the `prefix` itself,
to mimic AWS S3 like flat key behavior this PR ensures
that if `prefix` is directory object, it should be
automatically considered to be part of the eventual
listing result.

fixes #11370
2021-02-05 10:12:25 -08:00
Harshavardhana
8bb580abfc fix: use getObjectNInfo to avoid bytes.Buffer usage (#11428)
few places were still using legacy call GetObject()
which was mainly designed for client response writer,
use GetObjectNInfo() for internal calls instead.
2021-02-05 09:57:30 -08:00
Harshavardhana
af9cb5f5f2 remove deprecated StandardSCData 2021-02-05 01:34:23 -08:00
Harshavardhana
9497dfd804 docs: add deprecation notice for federation 2021-02-04 17:18:37 -08:00
Harshavardhana
da55a05587 fix aggressive expiration detection (#11446)
for some flaky networks this may be too fast of a value
choose a defensive value, and let this be addressed
properly in a new refactor of dsync with renewal logic.

Also enable faster fallback delay to cater for misconfigured
IPv6 servers

refer
 - https://golang.org/pkg/net/#Dialer
 - https://tools.ietf.org/html/rfc6555
2021-02-04 16:56:40 -08:00
Harshavardhana
3fc4d6f620 update dependenices for relevant projects (#11445)
- minio-go -> v7.0.8
- ldap/v3 -> v3.2.4
- reedsolomon -> v1.9.11
- sio-go -> v0.3.1
- msgp -> v1.1.5
- simdjson-go, md5-simd, highwayhash
2021-02-04 13:49:52 -08:00
Ritesh H Shukla
67a8f37df0 fix: disk usage capacity metric reporting (#11435) 2021-02-04 12:26:58 -08:00
Anis Elleuch
075c429021 lifcycle: Add more validation to the config (#11382) 2021-02-04 11:26:02 -08:00
ArthurMa
df0c678167 fix: ldap config parsing issue for UserDNSearchFilter (#11437) 2021-02-04 11:07:29 -08:00
Harshavardhana
f108873c48 fix: replication metadata comparsion and other fixes (#11410)
- using miniogo.ObjectInfo.UserMetadata is not correct
- using UserTags from Map->String() can change order
- ContentType comparison needs to be removed.
- Compare both lowercase and uppercase key names.
- do not silently error out constructing PutObjectOptions
  if tag parsing fails
- avoid notification for empty object info, failed operations
  should rely on valid objInfo for notification in all
  situations
- optimize copyObject implementation, also introduce a new 
  replication event
- clone ObjectInfo() before scheduling for replication
- add additional headers for comparison
- remove strings.EqualFold comparison avoid unexpected bugs
- fix pool based proxying with multiple pools
- compare only specific metadata

Co-authored-by: Poorna Krishnamoorthy <poornas@users.noreply.github.com>
2021-02-03 20:41:33 -08:00
Andreas Auernhammer
871b450dbd crypto: add support for decrypting SSE-KMS metadata (#11415)
This commit refactors the SSE implementation and add
S3-compatible SSE-KMS context handling.

SSE-KMS differs from SSE-S3 in two main aspects:
 1. The client can request a particular key and
    specify a KMS context as part of the request.
 2. The ETag of an SSE-KMS encrypted object is not
    the MD5 sum of the object content.

This commit only focuses on the 1st aspect.

A client can send an optional SSE context when using
SSE-KMS. This context is remembered by the S3 server
such that the client does not have to specify the
context again (during multipart PUT / GET / HEAD ...).
The crypto. context also includes the bucket/object
name to prevent renaming objects at the backend.

Now, AWS S3 behaves as following:
 - If the user does not provide a SSE-KMS context
   it does not store one - resp. does not include
   the SSE-KMS context header in the response (e.g. HEAD).
 - If the user specifies a SSE-KMS context without
   the bucket/object name then AWS stores the exact
   context the client provided but adds the bucket/object
   name internally. The response contains the KMS context
   without the bucket/object name.
 - If the user specifies a SSE-KMS context with
   the bucket/object name then AWS again stores the exact
   context provided by the client. The response contains
   the KMS context with the bucket/object name.

This commit implements this behavior w.r.t. SSE-KMS.
However, as of now, no such object can be created since
the server rejects SSE-KMS encryption requests.

This commit is one stepping stone for SSE-KMS support.

Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 15:19:08 -08:00
Harshavardhana
f71e192343 avoid listing an empty dir without __XLDIR__ (#11427)
```
minio server /tmp/disk{1...4}
mc mb myminio/testbucket/
mkdir -p /tmp/disk{1..4}/testbucket/test-prefix/
```

This would end up being listed in the current
master, this PR fixes this situation.

If a directory is a leaf dir we should it
being listed, since it cannot be deleted anymore
with DeleteObject, DeleteObjects() API calls
because we natively support directories now.

Avoid listing it and let healing purge this folder
eventually in the background.
2021-02-03 14:06:54 -08:00
Anis Elleuch
b3f81e75f6 xl: Make it clear when to create delete marker for a non existant object (#11423) 2021-02-03 10:33:43 -08:00
Klaus Post
a71e0483c9 Fix nil disks in getOnlineDisksWithHealing (#11419)
If a disk is skipped when nil it is still returned.
2021-02-02 17:04:37 -08:00
Bahram Aghaei
f2d49ec21a Update ldap.md: add a link to ldap.go (#11409) 2021-02-02 15:47:04 -08:00
Klaus Post
4a9d9c8585 Update colinmarc/hdfs (#11417)
Updates needed dependency as well.

Fixes #11416
2021-02-02 15:37:30 -08:00
Harshavardhana
c885777ac6 Add support for TCP_QUICKACK (#11369)
TCP_QUICKACK is a setting that allows TCP endpoints
to acknowledge the receipt of data instantly in situations
where they would normally wait to see if more data
would be arriving.

https://assets.extrahop.com/whitepapers/TCP-Optimization-Guide-by-ExtraHop.pdf
2021-02-02 09:44:18 -08:00
Poorna Krishnamoorthy
fe3aca70c3 Make number of replication workers configurable. (#11379)
MINIO_API_REPLICATION_WORKERS env.var and
`mc admin config set api` allow number of replication
workers to be configurable. Defaults to half the number
of cpus available.

Co-authored-by: Poorna Krishnamoorthy <poorna@minio.io>
2021-02-02 16:45:06 +05:30
Ritesh H Shukla
c4848f9b4f Add process start time to cluster metrics. (#11405) 2021-02-01 23:02:18 -08:00
Andreas Auernhammer
838d4dafbd gateway: don't use encrypted ETags for If-Match (#11400)
This commit fixes a bug in the S3 gateway that causes
GET requests to fail when the object is encrypted by the
gateway itself.

The gateway was not able to GET the object since it always
specified a `If-Match` pre-condition checking that the object
ETag matches an expected ETag - even for encrypted ETags.

The problem is that an encrypted ETag will never match the ETag
computed by the backend causing the `If-Match` pre-condition
to fail.

This commit fixes this by not sending an `If-Match` header when
the ETag is encrypted. This is acceptable because:
  1. A gateway-encrypted object consists of two objects at the backend
     and there is no way to provide a concurrency-safe implementation
     of two consecutive S3 GETs in the deployment model of the S3
     gateway.
     Ref: S3 gateways are self-contained and isolated - and there may
          be multiple instances at the same time (no lock across
          instances).
  2. Even if the data object changes (concurrent PUT) while gateway
     A has download the metadata object (but not issued the GET to
     the data object => data race) then we don't return invalid data
     to the client since the decryption (of the currently uploaded data)
     will fail - given the metadata of the previous object.
2021-02-01 23:02:08 -08:00
swartz-k
8c663f93f7 fix: typo in chinese docs (#11401) 2021-02-01 18:42:58 -08:00
Minio Trusted
b4cb7edf85 Update yaml files to latest version RELEASE.2021-02-01T22-56-52Z 2021-02-01 23:28:23 +00:00
373 changed files with 48831 additions and 7972 deletions

View File

@@ -1,2 +1,9 @@
.git
.github
docs
default.etcd
browser
*.gz
*.tar.gz
*.bzip2
*.zip

View File

@@ -11,8 +11,8 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.14.x, 1.15.x]
os: [ubuntu-latest, windows-latest]
go-version: [1.16.x]
os: [ubuntu-latest, windows-latest, macos-latest]
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v1
@@ -21,6 +21,14 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Build on ${{ matrix.os }}
if: matrix.os == 'macos-latest'
env:
CGO_ENABLED: 0
GO111MODULE: on
run: |
make
make test-race
- name: Build on ${{ matrix.os }}
if: matrix.os == 'windows-latest'
env:
@@ -42,8 +50,6 @@ jobs:
curl -L -o nancy https://github.com/sonatype-nexus-community/nancy/releases/download/${nancy_version}/nancy-${nancy_version}-linux-amd64 && chmod +x nancy
go list -m all | ./nancy sleuth
make
diff -au <(gofmt -s -d cmd) <(printf "")
diff -au <(gofmt -s -d pkg) <(printf "")
make test-race
make crosscompile
make verify

View File

@@ -17,6 +17,8 @@ linters:
- gosimple
- deadcode
- structcheck
- gomodguard
- gofmt
issues:
exclude-use-default: false

2239
CREDITS

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
FROM golang:1.15-alpine as builder
FROM golang:1.16-alpine as builder
LABEL maintainer="MinIO Inc <dev@min.io>"

View File

@@ -1,4 +1,4 @@
FROM golang:1.15-alpine as builder
FROM golang:1.16-alpine as builder
LABEL maintainer="MinIO Inc <dev@min.io>"

View File

@@ -6,8 +6,6 @@ LABEL maintainer="MinIO Inc <dev@min.io>"
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
COPY minio /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE
ENV MINIO_UPDATE=off \
MINIO_ACCESS_KEY_FILE=access_key \
@@ -17,10 +15,9 @@ ENV MINIO_UPDATE=off \
MINIO_KMS_MASTER_KEY_FILE=kms_master_key \
MINIO_SSE_MASTER_KEY_FILE=sse_master_key
RUN \
microdnf update --nodocs && \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
microdnf clean all && \
RUN microdnf update --nodocs
RUN microdnf install curl ca-certificates shadow-utils util-linux --nodocs
RUN microdnf clean all && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh

View File

@@ -2,11 +2,13 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
ARG TARGETARCH
ARG RELEASE
LABEL name="MinIO" \
vendor="MinIO Inc <dev@min.io>" \
maintainer="MinIO Inc <dev@min.io>" \
version="RELEASE.2021-01-30T00-20-58Z" \
release="RELEASE.2021-01-30T00-20-58Z" \
version="${RELEASE}" \
release="${RELEASE}" \
summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \
description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads."
@@ -28,9 +30,9 @@ RUN \
microdnf install curl ca-certificates shadow-utils util-linux --nodocs && \
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \
microdnf install minisign --nodocs && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio -o /usr/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.sha256sum -o /usr/bin/minio.sha256sum && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/minio.minisig -o /usr/bin/minio.minisig && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /usr/bin/minio && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /usr/bin/minio.sha256sum && \
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /usr/bin/minio.minisig && \
microdnf clean all && \
chmod +x /usr/bin/minio && \
chmod +x /usr/bin/docker-entrypoint.sh && \

View File

@@ -17,38 +17,28 @@ checks:
getdeps:
@mkdir -p ${GOPATH}/bin
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && go get github.com/quasilyte/go-ruleguard/cmd/ruleguard@v0.2.1)
@which msgp 1>/dev/null || (echo "Installing msgp" && go get github.com/tinylib/msgp@v1.1.3)
@which stringer 1>/dev/null || (echo "Installing stringer" && go get golang.org/x/tools/cmd/stringer)
crosscompile:
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps fmt lint ruleguard check-gen
verifiers: getdeps lint check-gen
check-gen:
@go generate ./... >/dev/null
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
fmt:
@echo "Running $@ check"
@GO111MODULE=on gofmt -d cmd/
@GO111MODULE=on gofmt -d pkg/
lint:
@echo "Running $@ check"
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=10m --config ./.golangci.yml
ruleguard:
@echo "Running $@ check"
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
# Builds minio, runs the verifiers then runs the tests.
check: test
test: verifiers build
@echo "Running unit tests"
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
@GOGC=25 GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
test-race: verifiers build
@echo "Running unit tests under -race"
@@ -71,18 +61,18 @@ build: checks
@echo "Building minio binary to './minio'"
@GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
hotfix: LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#'))
TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)"
hotfix: install
hotfix-vars:
$(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \
sed 's#RELEASE\.\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)T\([0-9]\+\)-\([0-9]\+\)-\([0-9]\+\)Z#\1-\2-\3T\4:\5:\6Z#')))
$(eval TAG := "minio/minio:$(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)")
hotfix: hotfix-vars install
docker-hotfix: hotfix checks
@echo "Building minio docker image '$(TAG)'"
@docker build -t $(TAG) . -f Dockerfile.dev
docker: checks
docker: build checks
@echo "Building minio docker image '$(TAG)'"
@GOOS=linux GO111MODULE=on CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@docker build -t $(TAG) . -f Dockerfile.dev
# Builds minio and installs it to $GOPATH/bin.

100
README.md
View File

@@ -5,32 +5,32 @@
MinIO is a High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads.
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
This README provides quickstart instructions on running MinIO on baremetal hardware, including Docker-based installations. For Kubernetes environments,
use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
# Docker Installation
Use the following commands to run a standalone MinIO server on a Docker container.
Use the following commands to run a standalone MinIO server on a Docker container.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
## Stable
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
Run the following command to run the latest stable image of MinIO on a Docker container using an ephemeral data volume:
```sh
docker run -p 9000:9000 minio/minio server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
@@ -45,12 +45,12 @@ Run the following command to run the bleeding-edge image of MinIO on a Docker co
docker run -p 9000:9000 minio/minio:edge server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
@@ -61,9 +61,9 @@ see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view
Use the following commands to run a standalone MinIO server on macOS.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
## Homebrew (recommended)
@@ -82,12 +82,12 @@ brew uninstall minio
brew install minio/stable/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
## Binary Download
@@ -100,12 +100,12 @@ chmod +x minio
./minio server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
@@ -130,18 +130,18 @@ The following table lists supported architectures. Replace the `wget` URL with t
| 64-bit PowerPC LE (ppc64le) | https://dl.min.io/server/minio/release/linux-ppc64le/minio |
| IBM Z-Series (S390X) | https://dl.min.io/server/minio/release/linux-s390x/minio |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
# Microsoft Windows
@@ -158,17 +158,17 @@ Use the following command to run a standalone MinIO server on the Windows host.
minio.exe server D:\
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
# FreeBSD
@@ -184,27 +184,27 @@ service minio start
# Install from Source
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.15](https://golang.org/dl/#stable)
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.16](https://golang.org/dl/#stable)
```sh
GO111MODULE=on go get github.com/minio/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Browser, an embedded
web-based object browser built into MinIO Server. Point a web browser running on the host machine to http://127.0.0.1:9000 and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see https://docs.min.io/docs/ and click **MINIO SDKS** in the navigation to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Quickstart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide.html)
for more complete documentation.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
# Deployment Recommendations

View File

@@ -89,7 +89,7 @@ service minio start
## 使用源码安装
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.14](https://golang.org/dl/#stable)
采用源码安装仅供开发人员和高级用户使用,如果你还没有Golang环境 请参考 [How to install Golang](https://golang.org/doc/install)。最低需要Golang版本为 [go1.16](https://golang.org/dl/#stable)
```sh
GO111MODULE=on go get github.com/minio/minio

1
browser/.gitignore vendored
View File

@@ -17,4 +17,3 @@ release
*.syso
coverage.txt
node_modules
production

View File

@@ -17,24 +17,13 @@ nvm install stable
npm install
```
### Install `go-bindata` and `go-bindata-assetfs`
If you do not have a working Golang environment, please follow [Install Golang](https://golang.org/doc/install)
```sh
go get github.com/go-bindata/go-bindata/go-bindata
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
```
## Generating Assets
### Generate ui-assets.go
```sh
npm run release
```
This generates ui-assets.go in the current directory. Now do `make` in the parent directory to build the minio binary with the newly generated ``ui-assets.go``
This generates `production` in the current directory.
## Run MinIO Browser with live reload

View File

@@ -24,9 +24,6 @@ jest.mock("jwt-decode")
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
jest.mock("../../web", () => ({
GenerateAuth: jest.fn(() => {
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
}),
SetAuth: jest.fn(
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
if (

View File

@@ -26,6 +26,7 @@ import {
SHARE_OBJECT_EXPIRY_HOURS,
SHARE_OBJECT_EXPIRY_MINUTES
} from "../constants"
import QRCode from "react-qr-code";
export class ShareObjectModal extends React.Component {
constructor(props) {
@@ -89,6 +90,7 @@ export class ShareObjectModal extends React.Component {
<ModalHeader>Share Object</ModalHeader>
<ModalBody>
<div className="input-group copy-text">
<QRCode value={url} size={128}/>
<label>Shareable Link</label>
<input
type="text"

View File

@@ -75,6 +75,11 @@
border-color: darken(@input-border, 5%);
}
}
svg {
display: block;
margin: 0 auto 5px;
}
}
/*--------------------------
@@ -150,4 +155,4 @@
100% {
transform: rotate(360deg);
}
}
}

11
browser/assets.go Normal file
View File

@@ -0,0 +1,11 @@
package browser
import "embed"
//go:embed production/*
var fs embed.FS
// GetStaticAssets returns assets
func GetStaticAssets() embed.FS {
return fs
}

View File

@@ -14,19 +14,11 @@
* limitations under the License.
*/
var moment = require('moment')
var async = require('async')
var exec = require('child_process').exec
var fs = require('fs')
var isProduction = process.env.NODE_ENV == 'production' ? true : false
var assetsFileName = ''
var commitId = ''
var date = moment.utc()
var version = date.format('YYYY-MM-DDTHH:mm:ss') + 'Z'
var releaseTag = date.format('YYYY-MM-DDTHH-mm-ss') + 'Z'
var buildType = 'DEVELOPMENT'
if (process.env.MINIO_UI_BUILD) buildType = process.env.MINIO_UI_BUILD
rmDir = function(dirPath) {
try { var files = fs.readdirSync(dirPath); }
@@ -53,74 +45,6 @@ async.waterfall([
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
if (isProduction) {
fs.renameSync('production/index_bundle.js',
'production/index_bundle-' + releaseTag + '.js')
} else {
fs.renameSync('dev/index_bundle.js',
'dev/index_bundle-' + releaseTag + '.js')
}
var cmd = 'git log --format="%H" -n1'
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
if (!stdout) throw new Error('commitId is empty')
commitId = stdout.replace('\n', '')
if (commitId.length !== 40) throw new Error('commitId invalid : ' + commitId)
assetsFileName = 'ui-assets.go';
var cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true production/...'
if (!isProduction) {
cmd = 'go-bindata-assetfs -o bindata_assetfs.go -pkg browser -nocompress=true dev/...'
}
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
var cmd = 'gofmt -s -w -l bindata_assetfs.go'
console.log('Running', cmd)
exec(cmd, cb)
},
function(stdout, stderr, cb) {
fs.renameSync('bindata_assetfs.go', assetsFileName)
fs.appendFileSync(assetsFileName, '\n')
fs.appendFileSync(assetsFileName, 'var UIReleaseTag = "' + buildType + '.' +
releaseTag + '"\n')
fs.appendFileSync(assetsFileName, 'var UICommitID = "' + commitId + '"\n')
fs.appendFileSync(assetsFileName, 'var UIVersion = "' + version + '"')
fs.appendFileSync(assetsFileName, '\n')
var contents;
if (isProduction) {
contents = fs.readFileSync(assetsFileName, 'utf8')
.replace(/_productionIndexHtml/g, '_productionIndexHTML')
.replace(/productionIndexHtmlBytes/g, 'productionIndexHTMLBytes')
.replace(/productionIndexHtml/g, 'productionIndexHTML')
.replace(/_productionIndex_bundleJs/g, '_productionIndexBundleJs')
.replace(/productionIndex_bundleJsBytes/g, 'productionIndexBundleJsBytes')
.replace(/productionIndex_bundleJs/g, 'productionIndexBundleJs')
.replace(/_productionJqueryUiMinJs/g, '_productionJqueryUIMinJs')
.replace(/productionJqueryUiMinJsBytes/g, 'productionJqueryUIMinJsBytes')
.replace(/productionJqueryUiMinJs/g, 'productionJqueryUIMinJs');
} else {
contents = fs.readFileSync(assetsFileName, 'utf8')
.replace(/_devIndexHtml/g, '_devIndexHTML')
.replace(/devIndexHtmlBytes/g, 'devIndexHTMLBytes')
.replace(/devIndexHtml/g, 'devIndexHTML')
.replace(/_devIndex_bundleJs/g, '_devIndexBundleJs')
.replace(/devIndex_bundleJsBytes/g, 'devIndexBundleJsBytes')
.replace(/devIndex_bundleJs/g, 'devIndexBundleJs')
.replace(/_devJqueryUiMinJs/g, '_devJqueryUIMinJs')
.replace(/devJqueryUiMinJsBytes/g, 'devJqueryUIMinJsBytes')
.replace(/devJqueryUiMinJs/g, 'devJqueryUIMinJs');
}
contents = contents.replace(/MINIO_UI_VERSION/g, version)
contents = contents.replace(/index_bundle.js/g, 'index_bundle-' + releaseTag + '.js')
fs.writeFileSync(assetsFileName, contents, 'utf8')
console.log('UI assets file :', assetsFileName)
cb()
}
], function(err) {
if (err) return console.log(err)
})

32008
browser/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@
"test": "jest",
"dev": "NODE_ENV=dev webpack-dev-server --devtool cheap-module-eval-source-map --progress --colors --hot --content-base dev",
"build": "NODE_ENV=dev node build.js",
"release": "NODE_ENV=production MINIO_UI_BUILD=RELEASE node build.js",
"release": "NODE_ENV=production node build.js",
"format": "esformatter -i 'app/**/*.js'"
},
"jest": {
@@ -84,6 +84,7 @@
"react-dropzone": "^11.0.1",
"react-infinite-scroller": "^1.2.4",
"react-onclickout": "^2.0.8",
"react-qr-code": "^1.1.1",
"react-redux": "^5.1.2",
"react-router-dom": "^5.2.0",
"redux": "^4.0.5",

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

View File

@@ -0,0 +1,59 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>MinIO Browser</title>
<link rel="icon" type="image/png" sizes="32x32" href="/minio/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="/minio/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="/minio/favicon-16x16.png">
<link rel="stylesheet" href="/minio/loader.css" type="text/css">
</head>
<body>
<div class="page-load">
<div class="pl-inner">
<img src="/minio/logo.svg" alt="">
</div>
</div>
<div id="root"></div>
<!--[if lt IE 11]>
<div class="ie-warning">
<div class="iw-inner">
<i class="iwi-icon fas fa-exclamation-triangle"></i>
You are using Internet Explorer version 12.0 or lower. Due to security issues and lack of support for Web Standards it is highly recommended that you upgrade to a modern browser
<ul>
<li>
<a href="http://www.google.com/chrome/">
<img src="chrome.png" alt="">
<div>Chrome</div>
</a>
</li>
<li>
<a href="https://www.mozilla.org/en-US/firefox/new/">
<img src="firefox.png" alt="">
<div>Firefox</div>
</a>
</li>
<li>
<a href="https://www.apple.com/safari/">
<img src="safari.png" alt="">
<div>Safari</div>
</a>
</li>
</ul>
<div class="iwi-skip">Skip & Continue</div>
</div>
</div>
<![endif]-->
<script>currentUiVersion = 'MINIO_UI_VERSION'</script>
<script src="/minio/index_bundle.js"></script>
</body>
</html>

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,98 @@
.page-load {
position: fixed;
width: 100%;
height: 100%;
top: 0;
left: 0;
background: #002a37;
z-index: 100;
transition: opacity 200ms;
-webkit-transition: opacity 200ms;
}
.pl-0{
opacity: 0;
}
.pl-1 {
display: none;
}
.pl-inner {
position: absolute;
width: 100px;
height: 100px;
left: 50%;
margin-left: -50px;
top: 50%;
margin-top: -50px;
text-align: center;
-webkit-animation: fade-in 500ms;
animation: fade-in 500ms;
-webkit-animation-fill-mode: both;
animation-fill-mode: both;
animation-delay: 350ms;
-webkit-animation-delay: 350ms;
-webkit-backface-visibility: visible;
backface-visibility: visible;
}
.pl-inner:before {
content: '';
position: absolute;
width: 100%;
height: 100%;
left: 0;
top: 0;
display: block;
-webkit-animation: spin 1000ms infinite linear;
animation: spin 1000ms infinite linear;
border: 1px solid rgba(255, 255, 255, 0.2);;
border-left-color: #fff;
border-radius: 50%;
}
.pl-inner > img {
width: 30px;
margin-top: 21px;
}
@-webkit-keyframes fade-in {
0% {
opacity: 0;
}
100% {
opacity: 1;
}
}
@keyframes fade-in {
0% {
opacity: 0;
}
100% {
opacity: 1;
}
}
@-webkit-keyframes spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}
@keyframes spin {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}

View File

@@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="93px" height="187px" viewBox="0 0 93 187" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generator: Sketch 48.2 (47327) - http://www.bohemiancoding.com/sketch -->
<title>logo</title>
<desc>Created with Sketch.</desc>
<defs></defs>
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="logo" transform="translate(0.187500, -0.683594)" fill="#FFFFFF" fill-rule="nonzero">
<path d="M91.49,46.551 C86.7827023,38.7699609 82.062696,30.9966172 77.33,23.231 C74.87,19.231 72.33,15.231 69.88,11.231 C69.57,10.731 69.18,10.291 68.88,9.831 C64.35,2.931 55.44,-1.679 46.73,2.701 C42.9729806,4.51194908 40.0995718,7.75449451 38.7536428,11.7020516 C37.4077139,15.6496086 37.701799,19.9721186 39.57,23.701 C41.08,26.641 43.57,29.121 45.91,31.581 C53.03,39.141 60.38,46.491 67.45,54.111 C72.4175495,59.4492221 74.4526451,66.8835066 72.8965704,74.0075359 C71.3404956,81.1315653 66.390952,87.0402215 59.65,89.821 C59.4938176,89.83842 59.3361824,89.83842 59.18,89.821 L59.18,54.591 C46.6388051,61.0478363 35.3944735,69.759905 26.01,80.291 C11.32,96.671 2.64,117.141 0.01,132.071 L23.96,119.821 C31.96,115.771 39.86,111.821 48.14,107.581 L48.14,175.921 L59.14,187.131 L59.14,101.831 C59.14,101.831 59.39,101.711 60.22,101.261 C63.5480598,99.6738911 66.7772674,97.8873078 69.89,95.911 C77.7130888,90.4306687 82.7479457,81.8029342 83.6709542,72.295947 C84.5939627,62.7889599 81.3127806,53.3538429 74.69,46.471 C66.49,37.891 58.24,29.351 50.05,20.761 C47.67,18.261 47.72,15.101 50.05,12.881 C52.38,10.661 55.56,10.881 57.96,13.331 L61.38,16.781 C64.1,19.681 66.79,22.611 69.53,25.481 C76.4547149,32.7389629 83.3947303,39.9823123 90.35,47.211 C90.7,47.571 91.12,47.871 91.5,48.211 L91.93,47.951 C91.8351945,47.4695902 91.6876376,47.0000911 91.49,46.551 Z M48.11,94.931 C47.9883217,95.5022568 47.6230065,95.9917791 47.11,96.271 C42.72,98.601 38.29,100.871 33.87,103.141 L17.76,111.401 C24.771203,96.7435071 35.1132853,83.9289138 47.96,73.981 C48.08,74.221 48.16,74.301 48.16,74.381 C48.15,81.231 48.17,88.081 48.11,94.931 Z" id="Shape"></path>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

File diff suppressed because one or more lines are too long

View File

@@ -21,7 +21,7 @@ _init() {
## Minimum required versions for build dependencies
GIT_VERSION="1.0"
GO_VERSION="1.13"
GO_VERSION="1.16"
OSX_VERSION="10.8"
KNAME=$(uname -s)
ARCH=$(uname -m)

View File

@@ -9,7 +9,7 @@ function _init() {
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64"
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips"
}
function _build() {

View File

@@ -3,5 +3,5 @@
set -e
for d in $(go list ./... | grep -v browser); do
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
CGO_ENABLED=1 go test -v -tags kqueue -race --timeout 100m "$d"
done

View File

@@ -33,6 +33,7 @@ export ACCESS_KEY="minio"
export SECRET_KEY="minio123"
export ENABLE_HTTPS=0
export GO111MODULE=on
export GOGC=25
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )

View File

@@ -28,6 +28,8 @@ WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server )
export GOGC=25
function start_minio_3_node() {
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123

View File

@@ -20,7 +20,6 @@ import (
"encoding/xml"
"io"
"net/http"
"net/url"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
@@ -180,7 +179,7 @@ func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := url.PathUnescape(vars["object"])
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
@@ -244,7 +243,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := url.PathUnescape(vars["object"])
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return

View File

@@ -172,7 +172,12 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
}
if err = globalBucketTargetSys.SetTarget(ctx, bucket, &target, update); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
switch err.(type) {
case BucketRemoteConnectionErr:
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrReplicationRemoteConnectionError, err), r.URL)
default:
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)

View File

@@ -19,12 +19,15 @@ package cmd
import (
"context"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"path"
"sort"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
@@ -66,7 +69,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
accessKey := vars["accessKey"]
ok, err := globalIAMSys.IsTempUser(accessKey)
ok, _, err := globalIAMSys.IsTempUser(accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -155,6 +158,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
if !implicitPerm {
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.GetUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
@@ -395,6 +399,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: parentUser,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", parentUser, claims),
IsOwner: owner,
@@ -405,6 +410,19 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
}
}
if implicitPerm && !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
Groups: cred.Groups,
Action: iampolicy.CreateUserAdminAction,
ConditionValues: getConditionValues(r, "", accessKey, claims),
IsOwner: owner,
Claims: claims,
DenyOnly: true, // check if changing password is explicitly denied.
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
@@ -483,7 +501,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
parentUser = cred.ParentUser
}
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, cred.Groups, createReq.Policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -663,6 +681,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
@@ -675,6 +694,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
@@ -688,12 +708,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
return rd, wr
}
buckets, err := objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
@@ -701,12 +715,47 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
logger.LogIf(ctx, err)
}
accountName := cred.AccessKey
if cred.ParentUser != "" {
accountName = cred.ParentUser
// If etcd, dns federation configured list buckets from etcd.
var buckets []BucketInfo
if globalDNSConfig != nil && globalBucketFederation {
dnsBuckets, err := globalDNSConfig.List()
if err != nil && !IsErrIgnored(err,
dns.ErrNoEntriesFound,
dns.ErrDomainMissing) {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for _, dnsRecords := range dnsBuckets {
buckets = append(buckets, BucketInfo{
Name: dnsRecords[0].Key,
Created: dnsRecords[0].CreationDate,
})
}
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Name < buckets[j].Name
})
} else {
buckets, err = objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
policies, err := globalIAMSys.PolicyDBGet(accountName, false)
accountName := cred.AccessKey
var policies []string
switch globalIAMSys.usersSysType {
case MinIOUsersSysType:
policies, err = globalIAMSys.PolicyDBGet(accountName, false)
case LDAPUsersSysType:
parentUser := accountName
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
policies, err = globalIAMSys.PolicyDBGet(parentUser, false, cred.Groups...)
default:
err = errors.New("should not happen!")
}
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -963,7 +1012,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
isGroup := vars["isGroup"] == "true"
if !isGroup {
ok, err := globalIAMSys.IsTempUser(entityName)
ok, _, err := globalIAMSys.IsTempUser(entityName)
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return

View File

@@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"os"
@@ -259,9 +260,11 @@ type ServerHTTPAPIStats struct {
// ServerHTTPStats holds all type of http operations performed to/from the server
// including their average execution time.
type ServerHTTPStats struct {
S3RequestsInQueue int32 `json:"s3RequestsInQueue"`
CurrentS3Requests ServerHTTPAPIStats `json:"currentS3Requests"`
TotalS3Requests ServerHTTPAPIStats `json:"totalS3Requests"`
TotalS3Errors ServerHTTPAPIStats `json:"totalS3Errors"`
TotalS3Canceled ServerHTTPAPIStats `json:"totalS3Canceled"`
}
// ServerInfoData holds storage, connections and other
@@ -296,7 +299,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
storageInfo, _ := objectAPI.StorageInfo(ctx)
// Collect any disk healing.
healing, _ := getAggregatedBackgroundHealState(ctx)
healing, _ := getAggregatedBackgroundHealState(ctx, nil)
healDisks := make(map[string]struct{}, len(healing.HealDisks))
for _, disk := range healing.HealDisks {
healDisks[disk] = struct{}{}
@@ -860,16 +863,14 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
keepConnLive(w, r, respCh)
}
func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState, error) {
var bgHealStates []madmin.BgHealState
localHealState, ok := getLocalBackgroundHealStatus()
if !ok {
return madmin.BgHealState{}, errServerNotInitialized
}
// getAggregatedBackgroundHealState returns the heal state of disks.
// If no ObjectLayer is provided no set status is returned.
func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmin.BgHealState, error) {
// Get local heal status first
bgHealStates = append(bgHealStates, localHealState)
bgHealStates, ok := getBackgroundHealStatus(ctx, o)
if !ok {
return bgHealStates, errServerNotInitialized
}
if globalIsDistErasure {
// Get heal status from other peers
@@ -884,33 +885,10 @@ func getAggregatedBackgroundHealState(ctx context.Context) (madmin.BgHealState,
if errCount == len(nerrs) {
return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
}
bgHealStates = append(bgHealStates, peersHealStates...)
bgHealStates.Merge(peersHealStates...)
}
// Aggregate healing result
var aggregatedHealStateResult = madmin.BgHealState{
ScannedItemsCount: bgHealStates[0].ScannedItemsCount,
LastHealActivity: bgHealStates[0].LastHealActivity,
NextHealRound: bgHealStates[0].NextHealRound,
HealDisks: bgHealStates[0].HealDisks,
}
bgHealStates = bgHealStates[1:]
for _, state := range bgHealStates {
aggregatedHealStateResult.ScannedItemsCount += state.ScannedItemsCount
aggregatedHealStateResult.HealDisks = append(aggregatedHealStateResult.HealDisks, state.HealDisks...)
if !state.LastHealActivity.IsZero() && aggregatedHealStateResult.LastHealActivity.Before(state.LastHealActivity) {
aggregatedHealStateResult.LastHealActivity = state.LastHealActivity
// The node which has the last heal activity means its
// is the node that is orchestrating self healing operations,
// which also means it is the same node which decides when
// the next self healing operation will be done.
aggregatedHealStateResult.NextHealRound = state.NextHealRound
}
}
return aggregatedHealStateResult, nil
return bgHealStates, nil
}
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
@@ -929,7 +907,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
return
}
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context())
aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context(), objectAPI)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@@ -1347,8 +1325,10 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
defer cancel()
var err error
nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
if err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
ctx, err = nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
if err != nil { // returns a locked lock
errResp(err)
return
}
@@ -1491,30 +1471,33 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
return
}
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
setEventStreamHeaders(w)
reportCh := make(chan bandwidth.Report, 1)
reportCh := make(chan bandwidth.Report)
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
bucketsRequestedString := r.URL.Query().Get("buckets")
bucketsRequested := strings.Split(bucketsRequestedString, ",")
go func() {
defer close(reportCh)
for {
reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...)
select {
case <-ctx.Done():
return
default:
time.Sleep(2 * time.Second)
case reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...):
time.Sleep(time.Duration(rnd.Float64() * float64(2*time.Second)))
}
}
}()
for {
select {
case report := <-reportCh:
enc := json.NewEncoder(w)
err := enc.Encode(report)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL)
case report, ok := <-reportCh:
if !ok {
return
}
if err := json.NewEncoder(w).Encode(report); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
@@ -1550,13 +1533,13 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
if globalLDAPConfig.Enabled {
ldapConn, err := globalLDAPConfig.Connect()
if err != nil {
ldap.Status = "offline"
ldap.Status = string(madmin.ItemOffline)
} else if ldapConn == nil {
ldap.Status = "Not Configured"
} else {
// Close ldap connection to avoid leaks.
ldapConn.Close()
ldap.Status = "online"
ldap.Status = string(madmin.ItemOnline)
}
}
@@ -1565,12 +1548,14 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
// Get the notification target info
notifyTarget := fetchLambdaInfo()
server := getLocalServerProperty(globalEndpoints, r)
local := getLocalServerProperty(globalEndpoints, r)
servers := globalNotificationSys.ServerInfo()
servers = append(servers, server)
servers = append(servers, local)
assignPoolNumbers(servers)
var backend interface{}
mode := madmin.ObjectLayerInitializing
mode := madmin.ItemInitializing
buckets := madmin.Buckets{}
objects := madmin.Objects{}
@@ -1578,18 +1563,23 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
objectAPI := newObjectLayerFn()
if objectAPI != nil {
mode = madmin.ObjectLayerOnline
mode = madmin.ItemOnline
// Load data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err == nil {
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
} else {
buckets = madmin.Buckets{Error: err.Error()}
objects = madmin.Objects{Error: err.Error()}
usage = madmin.Usage{Error: err.Error()}
}
// Fetching the backend information
backendInfo := objectAPI.BackendInfo()
if backendInfo.Type == BackendType(madmin.Erasure) {
if backendInfo.Type == madmin.Erasure {
// Calculate the number of online/offline disks of all nodes
var allDisks []madmin.Disk
for _, s := range servers {
@@ -1621,7 +1611,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
}
infoMsg := madmin.InfoMessage{
Mode: mode,
Mode: string(mode),
Domain: domain,
Region: globalServerRegion,
SQSARN: globalNotificationSys.GetARNList(false),
@@ -1646,6 +1636,22 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
writeSuccessResponseJSON(w, jsonBytes)
}
func assignPoolNumbers(servers []madmin.ServerProperties) {
for i := range servers {
for idx, ge := range globalEndpoints {
for _, endpoint := range ge.Endpoints {
if servers[i].Endpoint == endpoint.Host {
servers[i].PoolNumber = idx + 1
} else if host, err := xnet.ParseHost(servers[i].Endpoint); err == nil {
if host.Name == endpoint.Hostname() {
servers[i].PoolNumber = idx + 1
}
}
}
}
}
}
func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
lambdaMap := make(map[string][]madmin.TargetIDStatus)
@@ -1655,9 +1661,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
active, _ := tgt.IsActive()
targetID := tgt.ID()
if active {
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
} else {
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
}
list := lambdaMap[targetID.Name]
list = append(list, targetIDStatus)
@@ -1669,9 +1675,9 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
active, _ := tgt.IsActive()
targetID := tgt.ID()
if active {
targetIDStatus[targetID.ID] = madmin.Status{Status: "Online"}
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
} else {
targetIDStatus[targetID.ID] = madmin.Status{Status: "Offline"}
targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
}
list := lambdaMap[targetID.Name]
list = append(list, targetIDStatus)
@@ -1704,9 +1710,9 @@ func fetchKMSStatus() madmin.KMS {
}
if err := checkConnection(kmsInfo.Endpoints[0], 15*time.Second); err != nil {
kmsStat.Status = "offline"
kmsStat.Status = string(madmin.ItemOffline)
} else {
kmsStat.Status = "online"
kmsStat.Status = string(madmin.ItemOnline)
kmsContext := crypto.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
@@ -1714,7 +1720,7 @@ func fetchKMSStatus() madmin.KMS {
if err != nil {
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
} else {
kmsStat.Encrypt = "Ok"
kmsStat.Encrypt = "success"
}
// 2. Verify that we can indeed decrypt the (encrypted) key
@@ -1725,7 +1731,7 @@ func fetchKMSStatus() madmin.KMS {
case subtle.ConstantTimeCompare(key[:], decryptedKey[:]) != 1:
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
default:
kmsStat.Decrypt = "Ok"
kmsStat.Decrypt = "success"
}
}
return kmsStat
@@ -1741,11 +1747,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
err := checkConnection(target.Endpoint(), 15*time.Second)
if err == nil {
mapLog := make(map[string]madmin.Status)
mapLog[tgt] = madmin.Status{Status: "Online"}
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
loggerInfo = append(loggerInfo, mapLog)
} else {
mapLog := make(map[string]madmin.Status)
mapLog[tgt] = madmin.Status{Status: "offline"}
mapLog[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
loggerInfo = append(loggerInfo, mapLog)
}
}
@@ -1757,11 +1763,11 @@ func fetchLoggerInfo() ([]madmin.Logger, []madmin.Audit) {
err := checkConnection(target.Endpoint(), 15*time.Second)
if err == nil {
mapAudit := make(map[string]madmin.Status)
mapAudit[tgt] = madmin.Status{Status: "Online"}
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOnline)}
auditloggerInfo = append(auditloggerInfo, mapAudit)
} else {
mapAudit := make(map[string]madmin.Status)
mapAudit[tgt] = madmin.Status{Status: "Offline"}
mapAudit[tgt] = madmin.Status{Status: string(madmin.ItemOffline)}
auditloggerInfo = append(auditloggerInfo, mapAudit)
}
}

View File

@@ -22,7 +22,6 @@ import (
"fmt"
"net/http"
"sort"
"strings"
"sync"
"time"
@@ -90,8 +89,9 @@ type allHealState struct {
sync.RWMutex
// map of heal path to heal sequence
healSeqMap map[string]*healSequence
healSeqMap map[string]*healSequence // Indexed by endpoint
healLocalDisks map[Endpoint]struct{}
healStatus map[string]healingTracker // Indexed by disk ID
}
// newHealState - initialize global heal state management
@@ -99,6 +99,7 @@ func newHealState(cleanup bool) *allHealState {
hstate := &allHealState{
healSeqMap: make(map[string]*healSequence),
healLocalDisks: map[Endpoint]struct{}{},
healStatus: make(map[string]healingTracker),
}
if cleanup {
go hstate.periodicHealSeqsClean(GlobalContext)
@@ -113,7 +114,56 @@ func (ahs *allHealState) healDriveCount() int {
return len(ahs.healLocalDisks)
}
func (ahs *allHealState) getHealLocalDisks() Endpoints {
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
delete(ahs.healLocalDisks, ep)
}
for id, disk := range ahs.healStatus {
for _, ep := range healLocalDisks {
if disk.Endpoint == ep.String() {
delete(ahs.healStatus, id)
}
}
}
}
// updateHealStatus will update the heal status.
func (ahs *allHealState) updateHealStatus(tracker *healingTracker) {
ahs.Lock()
defer ahs.Unlock()
ahs.healStatus[tracker.ID] = *tracker
}
// Sort by zone, set and disk index
func sortDisks(disks []madmin.Disk) {
sort.Slice(disks, func(i, j int) bool {
a, b := &disks[i], &disks[j]
if a.PoolIndex != b.PoolIndex {
return a.PoolIndex < b.PoolIndex
}
if a.SetIndex != b.SetIndex {
return a.SetIndex < b.SetIndex
}
return a.DiskIndex < b.DiskIndex
})
}
// getLocalHealingDisks returns local healing disks indexed by endpoint.
func (ahs *allHealState) getLocalHealingDisks() map[string]madmin.HealingDisk {
ahs.RLock()
defer ahs.RUnlock()
dst := make(map[string]madmin.HealingDisk, len(ahs.healStatus))
for _, v := range ahs.healStatus {
dst[v.Endpoint] = v.toHealingDisk()
}
return dst
}
func (ahs *allHealState) getHealLocalDiskEndpoints() Endpoints {
ahs.RLock()
defer ahs.RUnlock()
@@ -124,15 +174,6 @@ func (ahs *allHealState) getHealLocalDisks() Endpoints {
return endpoints
}
func (ahs *allHealState) popHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
for _, ep := range healLocalDisks {
delete(ahs.healLocalDisks, ep)
}
}
func (ahs *allHealState) pushHealLocalDisks(healLocalDisks ...Endpoint) {
ahs.Lock()
defer ahs.Unlock()
@@ -662,6 +703,13 @@ func (h *healSequence) healSequenceStart(objAPI ObjectLayer) {
}
}
func (h *healSequence) logHeal(healType madmin.HealItemType) {
h.mutex.Lock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
}
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
globalHealConfigMu.Lock()
opts := globalHealConfig
@@ -834,11 +882,6 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f
return errHealStopSignalled
}
// Skip metacache entries healing
if strings.HasPrefix(object, "buckets/.minio.sys/.metacache/") {
return nil
}
err := h.queueHealTask(healSource{
bucket: bucket,
object: object,

View File

@@ -110,10 +110,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceAll(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountinfo").HandlerFunc(httpTraceAll(adminAPI.AccountInfoHandler))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")

View File

@@ -17,9 +17,11 @@
package cmd
import (
"context"
"net/http"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/madmin"
)
@@ -40,65 +42,39 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
}
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = "online"
network[nodeName] = string(madmin.ItemOnline)
localEndpoints = append(localEndpoints, endpoint)
continue
}
_, present := network[nodeName]
if !present {
if err := isServerResolvable(endpoint, time.Second); err == nil {
network[nodeName] = "online"
if err := isServerResolvable(endpoint, 2*time.Second); err == nil {
network[nodeName] = string(madmin.ItemOnline)
} else {
network[nodeName] = "offline"
network[nodeName] = string(madmin.ItemOffline)
// log once the error
logger.LogOnceIf(context.Background(), err, nodeName)
}
}
}
}
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
defer closeStorageDisks(localDisks)
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
return madmin.ServerProperties{
State: "ok",
props := madmin.ServerProperties{
State: string(madmin.ItemInitializing),
Endpoint: addr,
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
Version: Version,
CommitID: CommitID,
Network: network,
Disks: storageInfo.Disks,
}
}
func getLocalDisks(endpointServerPools EndpointServerPools) []madmin.Disk {
var localEndpoints Endpoints
network := make(map[string]string)
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
nodeName := endpoint.Host
if nodeName == "" {
nodeName = "localhost"
}
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = "online"
localEndpoints = append(localEndpoints, endpoint)
continue
}
_, present := network[nodeName]
if !present {
if err := isServerResolvable(endpoint, time.Second); err == nil {
network[nodeName] = "online"
} else {
network[nodeName] = "offline"
}
}
}
objLayer := newObjectLayerFn()
if objLayer != nil && !globalIsGateway {
// only need Disks information in server mode.
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
props.State = string(madmin.ItemOnline)
props.Disks = storageInfo.Disks
}
localDisks, _ := initStorageDisksWithErrors(localEndpoints)
defer closeStorageDisks(localDisks)
storageInfo, _ := getStorageInfo(localDisks, localEndpoints.GetAllStrings())
return storageInfo.Disks
return props
}

View File

@@ -133,18 +133,20 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(k, userMetadataPrefix) {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
continue
}
w.Header()[strings.ToLower(k)] = []string{v}
isSet = true
break
}
if !isSet {
w.Header().Set(k, v)
}
@@ -177,21 +179,25 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
if objInfo.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}
if objInfo.ReplicationStatus.String() != "" {
w.Header()[xhttp.AmzBucketReplicationStatus] = []string{objInfo.ReplicationStatus.String()}
}
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
Name: objInfo.Name,
UserTags: objInfo.UserTags,
VersionID: objInfo.VersionID,
ModTime: objInfo.ModTime,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.DeleteMarker,
})
if !expiryTime.IsZero() {
w.Header()[xhttp.AmzExpiration] = []string{
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
if opts.VersionID == "" {
if ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
Name: objInfo.Name,
UserTags: objInfo.UserTags,
VersionID: objInfo.VersionID,
ModTime: objInfo.ModTime,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.DeleteMarker,
SuccessorModTime: objInfo.SuccessorModTime,
}); !expiryTime.IsZero() {
w.Header()[xhttp.AmzExpiration] = []string{
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
}
}
}
if objInfo.TransitionStatus == lifecycle.TransitionComplete {

View File

@@ -36,8 +36,8 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("marker")
prefix = trimLeadingSlash(values.Get("prefix"))
marker = trimLeadingSlash(values.Get("marker"))
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
@@ -56,8 +56,8 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("key-marker")
prefix = trimLeadingSlash(values.Get("prefix"))
marker = trimLeadingSlash(values.Get("key-marker"))
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker")
@@ -86,8 +86,8 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
startAfter = values.Get("start-after")
prefix = trimLeadingSlash(values.Get("prefix"))
startAfter = trimLeadingSlash(values.Get("start-after"))
delimiter = values.Get("delimiter")
fetchOwner = values.Get("fetch-owner") == "true"
encodingType = values.Get("encoding-type")
@@ -117,8 +117,8 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
maxUploads = maxUploadsList
}
prefix = values.Get("prefix")
keyMarker = values.Get("key-marker")
prefix = trimLeadingSlash(values.Get("prefix"))
keyMarker = trimLeadingSlash(values.Get("key-marker"))
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")

View File

@@ -48,6 +48,12 @@ type LocationResponse struct {
Location string `xml:",chardata"`
}
// PolicyStatus captures information returned by GetBucketPolicyStatusHandler
type PolicyStatus struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PolicyStatus" json:"-"`
IsPublic string
}
// ListVersionsResponse - format for list bucket versions response.
type ListVersionsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
@@ -410,9 +416,11 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
listbuckets := make([]Bucket, 0, len(buckets))
var data = ListBucketsResponse{}
var owner = Owner{}
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
owner.ID = globalMinioDefaultOwnerID
for _, bucket := range buckets {
var listbucket = Bucket{}
listbucket.Name = bucket.Name
@@ -429,10 +437,12 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
versions := make([]ObjectVersion, 0, len(resp.Objects))
var owner = Owner{}
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
var data = ListVersionsResponse{}
owner.ID = globalMinioDefaultOwnerID
for _, object := range resp.Objects {
var content = ObjectVersion{}
if object.Name == "" {
@@ -485,10 +495,12 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
contents := make([]Object, 0, len(resp.Objects))
var owner = Owner{}
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
var data = ListObjectsResponse{}
owner.ID = globalMinioDefaultOwnerID
for _, object := range resp.Objects {
var content = Object{}
if object.Name == "" {
@@ -532,12 +544,11 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
contents := make([]Object, 0, len(objects))
var owner = Owner{}
var data = ListObjectsV2Response{}
if fetchOwner {
owner.ID = globalMinioDefaultOwnerID
var owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: "minio",
}
var data = ListObjectsV2Response{}
for _, object := range objects {
var content = Object{}
@@ -565,7 +576,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentLength) || strings.EqualFold(k, xhttp.AmzMetaUnencryptedContentMD5) {
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
content.UserMetadata[k] = v
@@ -639,8 +650,16 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
listPartsResponse.UploadID = partsInfo.UploadID
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
// Dumb values not meaningful
listPartsResponse.Initiator = Initiator{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.Owner = Owner{
ID: globalMinioDefaultOwnerID,
DisplayName: globalMinioDefaultOwnerID,
}
listPartsResponse.MaxParts = partsInfo.MaxParts
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker

View File

@@ -185,6 +185,10 @@ func registerAPIRouter(router *mux.Router) {
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
collectAPIStats("deleteobject", maxClients(httpTraceAll(api.DeleteObjectHandler))))
// PostRestoreObject
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
/// Bucket operations
// GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(
@@ -262,9 +266,9 @@ func registerAPIRouter(router *mux.Router) {
// ListObjectVersions
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectversions", maxClients(httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
// ListObjectsV1 (Legacy)
// GetBucketPolicyStatus
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
collectAPIStats("getpolicystatus", maxClients(httpTraceAll(api.GetBucketPolicyStatusHandler)))).Queries("policyStatus", "")
// PutBucketLifecycle
bucket.Methods(http.MethodPut).HandlerFunc(
collectAPIStats("putbucketlifecycle", maxClients(httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
@@ -320,9 +324,9 @@ func registerAPIRouter(router *mux.Router) {
// DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucket", maxClients(httpTraceAll(api.DeleteBucketHandler))))
// PostRestoreObject
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(httpTraceAll(api.PostRestoreObjectHandler)))).Queries("restore", "")
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("listobjectsv1", maxClients(httpTraceAll(api.ListObjectsV1Handler))))
}
/// Root operation

View File

@@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/etag"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
@@ -161,6 +162,7 @@ func checkAdminRequestAuth(ctx context.Context, r *http.Request, action iampolic
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.Action(action),
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
@@ -185,12 +187,12 @@ func getSessionToken(r *http.Request) (token string) {
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(r, getSessionToken(r))
claims, _ := getClaimsFromToken(getSessionToken(r))
return claims
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
func getClaimsFromToken(token string) (map[string]interface{}, error) {
claims := xjwt.NewMapClaims()
if token == "" {
return claims.Map(), nil
@@ -237,7 +239,7 @@ func getClaimsFromToken(r *http.Request, token string) (map[string]interface{},
if err != nil {
// Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client.
logger.LogIf(r.Context(), err, logger.Application)
logger.LogIf(GlobalContext, err, logger.Application)
return nil, errAuthentication
}
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
@@ -258,7 +260,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
return nil, ErrInvalidToken
}
claims, err := getClaimsFromToken(r, token)
claims, err := getClaimsFromToken(token)
if err != nil {
return nil, toAPIErrorCode(r.Context(), err)
}
@@ -271,7 +273,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
_, _, s3Err = checkRequestAuthTypeCredential(ctx, r, action, bucketName, objectName)
return s3Err
}
@@ -281,14 +283,13 @@ func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Ac
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
// Additionally returns the accessKey used in the request, and if this request is by an admin.
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
var cred auth.Credentials
func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err APIErrorCode) {
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned:
return accessKey, owner, ErrSignatureVersionNotSupported
return cred, owner, ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return accessKey, owner, s3Err
return cred, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
@@ -298,18 +299,18 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
region = ""
}
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
return accessKey, owner, s3Err
return cred, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return accessKey, owner, s3Err
return cred, owner, s3Err
}
var claims map[string]interface{}
claims, s3Err = checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return accessKey, owner, s3Err
return cred, owner, s3Err
}
// LocationConstraint is valid only for CreateBucketAction.
@@ -319,7 +320,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
return accessKey, owner, ErrMalformedXML
return cred, owner, ErrMalformedXML
}
// Populate payload to extract location constraint.
@@ -328,7 +329,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
var s3Error APIErrorCode
locationConstraint, s3Error = parseLocationConstraint(r)
if s3Error != ErrNone {
return accessKey, owner, s3Error
return cred, owner, s3Error
}
// Populate payload again to handle it in HTTP handler.
@@ -349,7 +350,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
ObjectName: objectName,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
return cred, owner, ErrNone
}
if action == policy.ListBucketVersionsAction {
@@ -364,15 +365,16 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
ObjectName: objectName,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
return cred, owner, ErrNone
}
}
return cred.AccessKey, owner, ErrAccessDenied
return cred, owner, ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
@@ -381,7 +383,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
Claims: claims,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
return cred, owner, ErrNone
}
if action == policy.ListBucketVersionsAction {
@@ -389,6 +391,7 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
// verify as a fallback.
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
@@ -397,11 +400,11 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
Claims: claims,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
return cred, owner, ErrNone
}
}
return cred.AccessKey, owner, ErrAccessDenied
return cred, owner, ErrAccessDenied
}
// Verify if request has valid AWS Signature Version '2'.
@@ -430,19 +433,14 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
return errCode
}
var (
err error
contentMD5, contentSHA256 []byte
)
// Extract 'Content-Md5' if present.
contentMD5, err = checkValidMD5(r.Header)
clientETag, err := etag.FromContentMD5(r.Header)
if err != nil {
return ErrInvalidDigest
}
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
var contentSHA256 []byte
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
contentSHA256, err = hex.DecodeString(sha256Sum[0])
@@ -459,8 +457,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1)
if err != nil {
return toAPIErrorCode(ctx, err)
}
@@ -498,6 +495,7 @@ func setAuthHandler(h http.Handler) http.Handler {
// Validate Authorization header if its valid for JWT request.
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte(authErr.Error()))
return
}
h.ServeHTTP(w, r)
@@ -553,6 +551,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
@@ -562,6 +561,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
@@ -585,6 +585,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
@@ -595,6 +596,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
@@ -650,6 +652,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", "", nil),
@@ -663,6 +666,7 @@ func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectN
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),

View File

@@ -17,16 +17,23 @@
package cmd
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/console"
"github.com/minio/minio/pkg/madmin"
)
const (
@@ -35,10 +42,200 @@ const (
)
//go:generate msgp -file $GOFILE -unexported
type healingTracker struct {
ID string
// future add more tracking capabilities
// healingTracker is used to persist healing information during a heal.
type healingTracker struct {
disk StorageAPI `msg:"-"`
ID string
PoolIndex int
SetIndex int
DiskIndex int
Path string
Endpoint string
Started time.Time
LastUpdate time.Time
ObjectsHealed uint64
ObjectsFailed uint64
BytesDone uint64
BytesFailed uint64
// Last object scanned.
Bucket string `json:"-"`
Object string `json:"-"`
// Numbers when current bucket started healing,
// for resuming with correct numbers.
ResumeObjectsHealed uint64 `json:"-"`
ResumeObjectsFailed uint64 `json:"-"`
ResumeBytesDone uint64 `json:"-"`
ResumeBytesFailed uint64 `json:"-"`
// Filled on startup/restarts.
QueuedBuckets []string
// Filled during heal.
HealedBuckets []string
// Add future tracking capabilities
// Be sure that they are included in toHealingDisk
}
// loadHealingTracker will load the healing tracker from the supplied disk.
// The disk ID will be validated against the loaded one.
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
if disk == nil {
return nil, errors.New("loadHealingTracker: nil disk given")
}
diskID, err := disk.GetDiskID()
if err != nil {
return nil, err
}
b, err := disk.ReadAll(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename))
if err != nil {
return nil, err
}
var h healingTracker
_, err = h.UnmarshalMsg(b)
if err != nil {
return nil, err
}
if h.ID != diskID && h.ID != "" {
return nil, fmt.Errorf("loadHealingTracker: disk id mismatch expected %s, got %s", h.ID, diskID)
}
h.disk = disk
h.ID = diskID
return &h, nil
}
// newHealingTracker will create a new healing tracker for the disk.
func newHealingTracker(disk StorageAPI) *healingTracker {
diskID, _ := disk.GetDiskID()
h := healingTracker{
disk: disk,
ID: diskID,
Path: disk.String(),
Endpoint: disk.Endpoint().String(),
Started: time.Now().UTC(),
}
h.PoolIndex, h.SetIndex, h.DiskIndex = disk.GetDiskLoc()
return &h
}
// update will update the tracker on the disk.
// If the tracker has been deleted an error is returned.
func (h *healingTracker) update(ctx context.Context) error {
if h.disk.Healing() == nil {
return fmt.Errorf("healingTracker: disk %q is not marked as healing", h.ID)
}
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
h.ID, _ = h.disk.GetDiskID()
h.PoolIndex, h.SetIndex, h.DiskIndex = h.disk.GetDiskLoc()
}
return h.save(ctx)
}
// save will unconditionally save the tracker and will be created if not existing.
func (h *healingTracker) save(ctx context.Context) error {
if h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
// Attempt to get location.
if api := newObjectLayerFn(); api != nil {
if ep, ok := api.(*erasureServerPools); ok {
h.PoolIndex, h.SetIndex, h.DiskIndex, _ = ep.getPoolAndSet(h.ID)
}
}
}
h.LastUpdate = time.Now().UTC()
htrackerBytes, err := h.MarshalMsg(nil)
if err != nil {
return err
}
globalBackgroundHealState.updateHealStatus(h)
return h.disk.WriteAll(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
htrackerBytes)
}
// delete the tracker on disk.
func (h *healingTracker) delete(ctx context.Context) error {
return h.disk.Delete(ctx, minioMetaBucket,
pathJoin(bucketMetaPrefix, slashSeparator, healingTrackerFilename),
false)
}
func (h *healingTracker) isHealed(bucket string) bool {
for _, v := range h.HealedBuckets {
if v == bucket {
return true
}
}
return false
}
// resume will reset progress to the numbers at the start of the bucket.
func (h *healingTracker) resume() {
h.ObjectsHealed = h.ResumeObjectsHealed
h.ObjectsFailed = h.ResumeObjectsFailed
h.BytesDone = h.ResumeBytesDone
h.BytesFailed = h.ResumeBytesFailed
}
// bucketDone should be called when a bucket is done healing.
// Adds the bucket to the list of healed buckets and updates resume numbers.
func (h *healingTracker) bucketDone(bucket string) {
h.ResumeObjectsHealed = h.ObjectsHealed
h.ResumeObjectsFailed = h.ObjectsFailed
h.ResumeBytesDone = h.BytesDone
h.ResumeBytesFailed = h.BytesFailed
h.HealedBuckets = append(h.HealedBuckets, bucket)
for i, b := range h.QueuedBuckets {
if b == bucket {
// Delete...
h.QueuedBuckets = append(h.QueuedBuckets[:i], h.QueuedBuckets[i+1:]...)
}
}
}
// setQueuedBuckets will add buckets, but exclude any that is already in h.HealedBuckets.
// Order is preserved.
func (h *healingTracker) setQueuedBuckets(buckets []BucketInfo) {
s := set.CreateStringSet(h.HealedBuckets...)
h.QueuedBuckets = make([]string, 0, len(buckets))
for _, b := range buckets {
if !s.Contains(b.Name) {
h.QueuedBuckets = append(h.QueuedBuckets, b.Name)
}
}
}
func (h *healingTracker) printTo(writer io.Writer) {
b, err := json.MarshalIndent(h, "", " ")
if err != nil {
writer.Write([]byte(err.Error()))
}
writer.Write(b)
}
// toHealingDisk converts the information to madmin.HealingDisk
func (h *healingTracker) toHealingDisk() madmin.HealingDisk {
return madmin.HealingDisk{
ID: h.ID,
Endpoint: h.Endpoint,
PoolIndex: h.PoolIndex,
SetIndex: h.SetIndex,
DiskIndex: h.DiskIndex,
Path: h.Path,
Started: h.Started.UTC(),
LastUpdate: h.LastUpdate.UTC(),
ObjectsHealed: h.ObjectsHealed,
ObjectsFailed: h.ObjectsFailed,
BytesDone: h.BytesDone,
BytesFailed: h.BytesFailed,
Bucket: h.Bucket,
Object: h.Object,
QueuedBuckets: h.QueuedBuckets,
HealedBuckets: h.HealedBuckets,
}
}
func initAutoHeal(ctx context.Context, objAPI ObjectLayer) {
@@ -90,7 +287,7 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
disk, _, err := connectEndpoint(endpoint)
if errors.Is(err, errUnformattedDisk) {
disksToHeal = append(disksToHeal, endpoint)
} else if err == nil && disk != nil && disk.Healing() {
} else if err == nil && disk != nil && disk.Healing() != nil {
disksToHeal = append(disksToHeal, disk.Endpoint())
}
}
@@ -114,7 +311,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
// Perform automatic disk healing when a disk is replaced locally.
diskCheckTimer := time.NewTimer(defaultMonitorNewDiskInterval)
defer diskCheckTimer.Stop()
wait:
for {
select {
case <-ctx.Done():
@@ -125,7 +322,7 @@ wait:
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
healDisks := globalBackgroundHealState.getHealLocalDisks()
healDisks := globalBackgroundHealState.getHealLocalDiskEndpoints()
if len(healDisks) > 0 {
// Reformat disks
bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
@@ -174,55 +371,76 @@ wait:
buckets, _ := z.ListBuckets(ctx)
buckets = append(buckets, BucketInfo{
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
})
// Buckets data are dispersed in multiple zones/sets, make
// sure to heal all bucket metadata configuration.
buckets = append(buckets, []BucketInfo{
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
}...)
// Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool {
a, b := strings.HasPrefix(buckets[i].Name, minioMetaBucket), strings.HasPrefix(buckets[j].Name, minioMetaBucket)
if a != b {
return a
}
return buckets[i].Created.After(buckets[j].Created)
})
// TODO(klauspost): This will block until all heals are done,
// in the future this should be able to start healing other sets at once.
var wg sync.WaitGroup
for i, setMap := range erasureSetInPoolDisksToHeal {
i := i
for setIndex, disks := range setMap {
for _, disk := range disks {
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
if len(disks) == 0 {
continue
}
wg.Add(1)
go func(setIndex int, disks []StorageAPI) {
defer wg.Done()
for _, disk := range disks {
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(i+1))
// So someone changed the drives underneath, healing tracker missing.
if !disk.Healing() {
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
diskID, err := disk.GetDiskID()
// So someone changed the drives underneath, healing tracker missing.
tracker, err := loadHealingTracker(ctx, disk)
if err != nil {
logger.LogIf(ctx, err)
// reading format.json failed or not found, proceed to look
// for new disks to be healed again, we cannot proceed further.
goto wait
logger.Info("Healing tracker missing on '%s', disk was swapped again on %s pool", disk, humanize.Ordinal(i+1))
tracker = newHealingTracker(disk)
}
if err := saveHealingTracker(disk, diskID); err != nil {
tracker.PoolIndex, tracker.SetIndex, tracker.DiskIndex = disk.GetDiskLoc()
tracker.setQueuedBuckets(buckets)
if err := tracker.save(ctx); err != nil {
logger.LogIf(ctx, err)
// Unable to write healing tracker, permission denied or some
// other unexpected error occurred. Proceed to look for new
// disks to be healed again, we cannot proceed further.
goto wait
return
}
err = z.serverPools[i].sets[setIndex].healErasureSet(ctx, buckets, tracker)
if err != nil {
logger.LogIf(ctx, err)
continue
}
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
var buf bytes.Buffer
tracker.printTo(&buf)
logger.Info("Summary:\n%s", buf.String())
logger.LogIf(ctx, tracker.delete(ctx))
// Only upon success pop the healed disk.
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
}
lbDisks := z.serverPools[i].sets[setIndex].getOnlineDisks()
if err := healErasureSet(ctx, setIndex, buckets, lbDisks); err != nil {
logger.LogIf(ctx, err)
continue
}
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
if err := disk.Delete(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix),
healingTrackerFilename, false); err != nil && !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
continue
}
// Only upon success pop the healed disk.
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
}
}(setIndex, disks)
}
}
wg.Wait()
}
}
}

View File

@@ -30,6 +30,146 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "ID")
return
}
case "PoolIndex":
z.PoolIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "SetIndex":
z.SetIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "DiskIndex":
z.DiskIndex, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "Path":
z.Path, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "Endpoint":
z.Endpoint, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Started":
z.Started, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "LastUpdate":
z.LastUpdate, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ObjectsHealed":
z.ObjectsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
case "ObjectsFailed":
z.ObjectsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "BytesDone":
z.BytesDone, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "BytesFailed":
z.BytesFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ResumeObjectsHealed":
z.ResumeObjectsHealed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsHealed")
return
}
case "ResumeObjectsFailed":
z.ResumeObjectsFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsFailed")
return
}
case "ResumeBytesDone":
z.ResumeBytesDone, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
case "ResumeBytesFailed":
z.ResumeBytesFailed, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
case "QueuedBuckets":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "HealedBuckets":
var zb0003 uint32
zb0003, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
default:
err = dc.Skip()
if err != nil {
@@ -42,10 +182,10 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
}
// EncodeMsg implements msgp.Encodable
func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 20
// write "ID"
err = en.Append(0x81, 0xa2, 0x49, 0x44)
err = en.Append(0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
if err != nil {
return
}
@@ -54,16 +194,283 @@ func (z healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "ID")
return
}
// write "PoolIndex"
err = en.Append(0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.PoolIndex)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
// write "SetIndex"
err = en.Append(0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.SetIndex)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
// write "DiskIndex"
err = en.Append(0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
if err != nil {
return
}
err = en.WriteInt(z.DiskIndex)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
// write "Path"
err = en.Append(0xa4, 0x50, 0x61, 0x74, 0x68)
if err != nil {
return
}
err = en.WriteString(z.Path)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
// write "Endpoint"
err = en.Append(0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Endpoint)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
// write "Started"
err = en.Append(0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Started)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
// write "LastUpdate"
err = en.Append(0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteTime(z.LastUpdate)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
// write "ObjectsHealed"
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsHealed)
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
// write "ObjectsFailed"
err = en.Append(0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
// write "BytesDone"
err = en.Append(0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.BytesDone)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
// write "BytesFailed"
err = en.Append(0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.BytesFailed)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "Object"
err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Object)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
// write "ResumeObjectsHealed"
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeObjectsHealed)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsHealed")
return
}
// write "ResumeObjectsFailed"
err = en.Append(0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeObjectsFailed)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsFailed")
return
}
// write "ResumeBytesDone"
err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeBytesDone)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
// write "ResumeBytesFailed"
err = en.Append(0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteUint64(z.ResumeBytesFailed)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
// write "QueuedBuckets"
err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.QueuedBuckets)))
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
for za0001 := range z.QueuedBuckets {
err = en.WriteString(z.QueuedBuckets[za0001])
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
// write "HealedBuckets"
err = en.Append(0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.HealedBuckets)))
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
for za0002 := range z.HealedBuckets {
err = en.WriteString(z.HealedBuckets[za0002])
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// map header, size 20
// string "ID"
o = append(o, 0x81, 0xa2, 0x49, 0x44)
o = append(o, 0xde, 0x0, 0x14, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "PoolIndex"
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.PoolIndex)
// string "SetIndex"
o = append(o, 0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.SetIndex)
// string "DiskIndex"
o = append(o, 0xa9, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendInt(o, z.DiskIndex)
// string "Path"
o = append(o, 0xa4, 0x50, 0x61, 0x74, 0x68)
o = msgp.AppendString(o, z.Path)
// string "Endpoint"
o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74)
o = msgp.AppendString(o, z.Endpoint)
// string "Started"
o = append(o, 0xa7, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Started)
// string "LastUpdate"
o = append(o, 0xaa, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65)
o = msgp.AppendTime(o, z.LastUpdate)
// string "ObjectsHealed"
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ObjectsHealed)
// string "ObjectsFailed"
o = append(o, 0xad, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ObjectsFailed)
// string "BytesDone"
o = append(o, 0xa9, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
o = msgp.AppendUint64(o, z.BytesDone)
// string "BytesFailed"
o = append(o, 0xab, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesFailed)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "Object"
o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74)
o = msgp.AppendString(o, z.Object)
// string "ResumeObjectsHealed"
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeObjectsHealed)
// string "ResumeObjectsFailed"
o = append(o, 0xb3, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeObjectsFailed)
// string "ResumeBytesDone"
o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65)
o = msgp.AppendUint64(o, z.ResumeBytesDone)
// string "ResumeBytesFailed"
o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64)
o = msgp.AppendUint64(o, z.ResumeBytesFailed)
// string "QueuedBuckets"
o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets)))
for za0001 := range z.QueuedBuckets {
o = msgp.AppendString(o, z.QueuedBuckets[za0001])
}
// string "HealedBuckets"
o = append(o, 0xad, 0x48, 0x65, 0x61, 0x6c, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.HealedBuckets)))
for za0002 := range z.HealedBuckets {
o = msgp.AppendString(o, z.HealedBuckets[za0002])
}
return
}
@@ -91,6 +498,146 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "ID")
return
}
case "PoolIndex":
z.PoolIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PoolIndex")
return
}
case "SetIndex":
z.SetIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SetIndex")
return
}
case "DiskIndex":
z.DiskIndex, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DiskIndex")
return
}
case "Path":
z.Path, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Path")
return
}
case "Endpoint":
z.Endpoint, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Endpoint")
return
}
case "Started":
z.Started, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Started")
return
}
case "LastUpdate":
z.LastUpdate, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LastUpdate")
return
}
case "ObjectsHealed":
z.ObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsHealed")
return
}
case "ObjectsFailed":
z.ObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ObjectsFailed")
return
}
case "BytesDone":
z.BytesDone, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesDone")
return
}
case "BytesFailed":
z.BytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "BytesFailed")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "Object":
z.Object, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Object")
return
}
case "ResumeObjectsHealed":
z.ResumeObjectsHealed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsHealed")
return
}
case "ResumeObjectsFailed":
z.ResumeObjectsFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeObjectsFailed")
return
}
case "ResumeBytesDone":
z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesDone")
return
}
case "ResumeBytesFailed":
z.ResumeBytesFailed, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResumeBytesFailed")
return
}
case "QueuedBuckets":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets")
return
}
if cap(z.QueuedBuckets) >= int(zb0002) {
z.QueuedBuckets = (z.QueuedBuckets)[:zb0002]
} else {
z.QueuedBuckets = make([]string, zb0002)
}
for za0001 := range z.QueuedBuckets {
z.QueuedBuckets[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "QueuedBuckets", za0001)
return
}
}
case "HealedBuckets":
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets")
return
}
if cap(z.HealedBuckets) >= int(zb0003) {
z.HealedBuckets = (z.HealedBuckets)[:zb0003]
} else {
z.HealedBuckets = make([]string, zb0003)
}
for za0002 := range z.HealedBuckets {
z.HealedBuckets[za0002], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HealedBuckets", za0002)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
@@ -104,7 +651,14 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z healingTracker) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.ID)
func (z *healingTracker) Msgsize() (s int) {
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 14 + msgp.Uint64Size + 14 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 20 + msgp.Uint64Size + 20 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize
for za0001 := range z.QueuedBuckets {
s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001])
}
s += 14 + msgp.ArrayHeaderSize
for za0002 := range z.HealedBuckets {
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
}
return
}

View File

@@ -19,7 +19,6 @@ package cmd
import (
"bytes"
"context"
"io/ioutil"
"math"
"math/rand"
"strconv"
@@ -175,56 +174,6 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
runPutObjectBenchmarkParallel(b, objLayer, objSize)
}
// Benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil {
b.Fatal(err)
}
textData := generateBytesData(objSize)
// generate etag for the generated data.
// etag of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/Erasure backend.
// get text data generated for number of bytes equal to object size.
md5hex := getMD5Hash(textData)
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
var buffer = new(bytes.Buffer)
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// randomly picks a character and returns its equivalent byte array.
func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
@@ -240,38 +189,6 @@ func generateBytesData(size int) []byte {
return bytes.Repeat(getRandomByte(), size)
}
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmark(b, objLayer, objSize)
}
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmarkParallel(b, objLayer, objSize)
}
// Parallel benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
@@ -315,58 +232,3 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// Parallel benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/Erasure backend.
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
i++
if i == 10 {
i = 0
}
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}

View File

@@ -37,7 +37,7 @@ func (err *errHashMismatch) Error() string {
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow *io.PipeWriter
iow io.WriteCloser
h hash.Hash
shardSize int64
canClose chan struct{} // Needed to avoid race explained in Close() call.
@@ -71,9 +71,10 @@ func (b *streamingBitrotWriter) Close() error {
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
r, w := io.Pipe()
h := algo.New()
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
go func() {
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
@@ -81,8 +82,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize = bitrotSumsTotalSize + length
}
err := disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)
r.CloseWithError(err)
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r))
close(bw.canClose)
}()
return bw

View File

@@ -17,13 +17,13 @@
package cmd
import (
"crypto/sha256"
"errors"
"hash"
"io"
"github.com/minio/highwayhash"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"golang.org/x/crypto/blake2b"
)
@@ -96,9 +96,9 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
return
}
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64, heal bool) io.Writer {
if algo == HighwayHash256S {
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize, heal)
}
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
}

View File

@@ -20,7 +20,6 @@ import (
"context"
"io"
"io/ioutil"
"log"
"os"
"testing"
)
@@ -28,7 +27,7 @@ import (
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
@@ -42,39 +41,39 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
disk.MakeVol(context.Background(), volume)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10, false)
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
_, err = writer.Write([]byte("aaaaa"))
if err != nil {
log.Fatal(err)
t.Fatal(err)
}
writer.(io.Closer).Close()
reader := newBitrotReader(disk, nil, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
b := make([]byte, 10)
if _, err = reader.ReadAt(b, 0); err != nil {
log.Fatal(err)
t.Fatal(err)
}
if _, err = reader.ReadAt(b, 10); err != nil {
log.Fatal(err)
t.Fatal(err)
}
if _, err = reader.ReadAt(b, 20); err != nil {
log.Fatal(err)
t.Fatal(err)
}
if _, err = reader.ReadAt(b[:5], 30); err != nil {
log.Fatal(err)
t.Fatal(err)
}
}

View File

@@ -17,14 +17,16 @@
package cmd
import (
"bytes"
"crypto/subtle"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/textproto"
"net/url"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
@@ -125,7 +127,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// Add/update buckets that are not registered with the DNS
bucketsToBeUpdatedSlice := bucketsToBeUpdated.ToSlice()
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice))
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
ctx, cancel := g.WithCancelOnError(GlobalContext)
defer cancel()
for index := range bucketsToBeUpdatedSlice {
index := index
g.Go(func() error {
@@ -133,14 +138,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
}, index)
}
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(GlobalContext, err)
}
if err := g.WaitErr(); err != nil {
logger.LogIf(ctx, err)
return
}
for _, bucket := range bucketsInConflict.ToSlice() {
logger.LogIf(GlobalContext, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket.", bucket, globalDomainIPs.ToSlice()))
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
}
var wg sync.WaitGroup
@@ -288,7 +292,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
listBuckets := objectAPI.ListBuckets
accessKey, owner, s3Error := checkRequestAuthTypeToAccessKey(ctx, r, policy.ListAllMyBucketsAction, "", "")
cred, owner, s3Error := checkRequestAuthTypeCredential(ctx, r, policy.ListAllMyBucketsAction, "", "")
if s3Error != ErrNone && s3Error != ErrAccessDenied {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
@@ -334,16 +338,17 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
// err will be nil here as we already called this function
// earlier in this request.
claims, _ := getClaimsFromToken(r, getSessionToken(r))
claims, _ := getClaimsFromToken(getSessionToken(r))
n := 0
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
for _, bucketInfo := range bucketsInfo {
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: accessKey,
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: iampolicy.ListBucketAction,
BucketName: bucketInfo.Name,
ConditionValues: getConditionValues(r, "", accessKey, claims),
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
@@ -408,6 +413,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
return
}
// Convert object name delete objects if it has `/` in the beginning.
for i := range deleteObjects.Objects {
deleteObjects.Objects[i].ObjectName = trimLeadingSlash(deleteObjects.Objects[i].ObjectName)
}
// Call checkRequestAuthType to populate ReqInfo.AccessKey before GetBucketInfo()
// Ignore errors here to preserve the S3 error behavior of GetBucketInfo()
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
@@ -424,6 +434,12 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteObjectsFn = api.CacheAPI().DeleteObjects
}
// Return Malformed XML as S3 spec if the list of objects is empty
if len(deleteObjects.Objects) == 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
return
}
var objectsToDelete = map[ObjectToDelete]int{}
getObjectInfoFn := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
@@ -480,7 +496,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
object.PurgeTransitioned = goi.TransitionStatus
}
if replicateDeletes {
delMarker, replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
replicate, repsync := checkReplicateDelete(ctx, bucket, ObjectToDelete{
ObjectName: object.ObjectName,
VersionID: object.VersionID,
}, goi, gerr)
@@ -495,9 +511,6 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
if object.VersionID != "" {
object.VersionPurgeStatus = Pending
if delMarker {
object.DeleteMarkerVersionID = object.VersionID
}
} else {
object.DeleteMarkerReplicationStatus = string(replication.Pending)
}
@@ -541,14 +554,18 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
})
deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
for i := range errs {
dindex := objectsToDelete[ObjectToDelete{
// DeleteMarkerVersionID is not used specifically to avoid
// lookup errors, since DeleteMarkerVersionID is only
// created during DeleteMarker creation when client didn't
// specify a versionID.
objToDel := ObjectToDelete{
ObjectName: dObjects[i].ObjectName,
VersionID: dObjects[i].VersionID,
DeleteMarkerVersionID: dObjects[i].DeleteMarkerVersionID,
VersionPurgeStatus: dObjects[i].VersionPurgeStatus,
DeleteMarkerReplicationStatus: dObjects[i].DeleteMarkerReplicationStatus,
PurgeTransitioned: dObjects[i].PurgeTransitioned,
}]
}
dindex := objectsToDelete[objToDel]
if errs[i] == nil || isErrObjectNotFound(errs[i]) || isErrVersionNotFound(errs[i]) {
if replicateDeletes {
dObjects[i].DeleteMarkerReplicationStatus = deleteList[i].DeleteMarkerReplicationStatus
@@ -580,6 +597,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
for _, dobj := range deletedObjects {
if dobj.ObjectName == "" {
continue
}
if replicateDeletes {
if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending {
dv := DeletedObjectVersionInfo{
@@ -591,25 +612,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
if hasLifecycleConfig && dobj.PurgeTransitioned == lifecycle.TransitionComplete { // clean up transitioned tier
deleteTransitionedObject(ctx, newObjectLayerFn(), bucket, dobj.ObjectName, lifecycle.ObjectOpts{
deleteTransitionedObject(ctx, objectAPI, bucket, dobj.ObjectName, lifecycle.ObjectOpts{
Name: dobj.ObjectName,
VersionID: dobj.VersionID,
DeleteMarker: dobj.DeleteMarker,
}, false, true)
}
}
// Notify deleted event for objects.
for _, dobj := range deletedObjects {
eventName := event.ObjectRemovedDelete
objInfo := ObjectInfo{
Name: dobj.ObjectName,
VersionID: dobj.VersionID,
Name: dobj.ObjectName,
VersionID: dobj.VersionID,
DeleteMarker: dobj.DeleteMarker,
}
if dobj.DeleteMarker {
objInfo.DeleteMarker = dobj.DeleteMarker
if objInfo.DeleteMarker {
objInfo.VersionID = dobj.DeleteMarkerVersionID
eventName = event.ObjectRemovedDeleteMarkerCreated
}
@@ -789,13 +806,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
resource, err := getResource(r.URL.Path, r.Host, globalDomainNames)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL, guessIsBrowserReq(r))
return
}
// Make sure that the URL does not contain object name.
if bucket != filepath.Clean(resource[1:]) {
// Make sure that the URL does not contain object name.
if bucket != path.Clean(resource[1:]) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
return
}
@@ -838,13 +857,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
defer fileBody.Close()
formValues.Set("Bucket", bucket)
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
// S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
}
object := formValues.Get("Key")
object := trimLeadingSlash(formValues.Get("Key"))
successRedirect := formValues.Get("success_action_redirect")
successStatus := formValues.Get("success_action_status")
@@ -858,12 +876,51 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
// Verify policy signature.
errCode := doesPolicySignatureMatch(formValues)
cred, errCode := doesPolicySignatureMatch(formValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Once signature is validated, check if the user has
// explicit permissions for the user.
{
token := formValues.Get(xhttp.AmzSecurityToken)
if token != "" && cred.AccessKey == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoAccessKey), r.URL, guessIsBrowserReq(r))
return
}
if cred.IsServiceAccount() && token == "" {
token = cred.SessionToken
}
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidToken), r.URL, guessIsBrowserReq(r))
return
}
// Extract claims if any.
claims, err := getClaimsFromToken(token)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if !globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.PutObjectAction,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
BucketName: bucket,
ObjectName: object,
IsOwner: globalActiveCred.AccessKey == cred.AccessKey,
Claims: claims,
}) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL, guessIsBrowserReq(r))
return
}
}
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL, guessIsBrowserReq(r))
@@ -872,10 +929,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Handle policy if it is set.
if len(policyBytes) > 0 {
postPolicyForm, err := parsePostPolicyForm(string(policyBytes))
postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat), r.URL, guessIsBrowserReq(r))
errAPI := errorCodes.ToAPIErr(ErrPostPolicyConditionInvalidFormat)
errAPI.Description = fmt.Sprintf("%s '(%s)'", errAPI.Description, err)
writeErrorResponse(ctx, w, errAPI, r.URL, guessIsBrowserReq(r))
return
}
@@ -903,20 +961,20 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Extract metadata to be saved from received Form.
metadata := make(map[string]string)
err = extractMetadataFromMap(ctx, formValues, metadata)
err = extractMetadataFromMime(ctx, textproto.MIMEHeader(formValues), metadata)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader, nil, nil)
pReader := NewPutObjReader(rawReader)
var objectEncryptionKey crypto.ObjectKey
// Check if bucket encryption is enabled
@@ -956,12 +1014,16 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
info := ObjectInfo{Size: fileSize}
// do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
pReader = NewPutObjReader(rawReader, hashReader, &objectEncryptionKey)
}
}
@@ -1018,6 +1080,64 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
}
}
// GetBucketPolicyStatusHandler - Retrieves the policy status
// for an MinIO bucket, indicating whether the bucket is public.
func (api objectAPIHandlers) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketPolicyStatus")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyStatusAction, bucket, ""); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Check if anonymous (non-owner) has access to list objects.
readable := globalPolicySys.IsAllowed(policy.Args{
Action: policy.ListBucketAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,
})
// Check if anonymous (non-owner) has access to upload objects.
writable := globalPolicySys.IsAllowed(policy.Args{
Action: policy.PutObjectAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,
})
encodedSuccessResponse := encodeResponse(PolicyStatus{
IsPublic: func() string {
// Silly to have special 'boolean' values yes
// but complying with silly implementation
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
if readable && writable {
return "TRUE"
}
return "FALSE"
}(),
})
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// HeadBucketHandler - HEAD Bucket
// ----------
// This operation is useful to determine if a bucket exists.

View File

@@ -65,13 +65,18 @@ func NewLifecycleSys() *LifecycleSys {
return &LifecycleSys{}
}
type expiryState struct {
expiryCh chan ObjectInfo
type expiryTask struct {
objInfo ObjectInfo
versionExpiry bool
}
func (es *expiryState) queueExpiryTask(oi ObjectInfo) {
type expiryState struct {
expiryCh chan expiryTask
}
func (es *expiryState) queueExpiryTask(oi ObjectInfo, rmVersion bool) {
select {
case es.expiryCh <- oi:
case es.expiryCh <- expiryTask{objInfo: oi, versionExpiry: rmVersion}:
default:
}
}
@@ -82,7 +87,7 @@ var (
func newExpiryState() *expiryState {
es := &expiryState{
expiryCh: make(chan ObjectInfo, 10000),
expiryCh: make(chan expiryTask, 10000),
}
go func() {
<-GlobalContext.Done()
@@ -94,8 +99,8 @@ func newExpiryState() *expiryState {
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
globalExpiryState = newExpiryState()
go func() {
for oi := range globalExpiryState.expiryCh {
applyExpiryRule(ctx, objectAPI, oi, false)
for t := range globalExpiryState.expiryCh {
applyExpiryRule(ctx, objectAPI, t.objInfo, false, t.versionExpiry)
}
}()
}
@@ -240,16 +245,11 @@ func transitionSCInUse(ctx context.Context, lfc *lifecycle.Lifecycle, bucket, ar
}
// set PutObjectOptions for PUT operation to transition data to target cluster
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
meta := make(map[string]string)
tag, err := tags.ParseObjectTags(objInfo.UserTags)
if err != nil {
return
}
putOpts = miniogo.PutObjectOptions{
UserMetadata: meta,
UserTags: tag.ToMap(),
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: objInfo.StorageClass,
@@ -259,22 +259,40 @@ func putTransitionOpts(objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
SourceETag: objInfo.ETag,
},
}
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
if objInfo.UserTags != "" {
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
if tag != nil {
putOpts.UserTags = tag.ToMap()
}
}
lkMap := caseInsensitiveMap(objInfo.UserDefined)
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
putOpts.ContentLanguage = lang
}
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
putOpts.ContentDisposition = disp
}
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
putOpts.CacheControl = cc
}
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
rmode := miniogo.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
rdate, err := time.Parse(time.RFC3339, retainDateStr)
if err != nil {
return
return putOpts, err
}
putOpts.RetainUntilDate = rdate
}
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
}
return
return putOpts, nil
}
// handle deletes of transitioned objects or object versions when one of the following is true:
@@ -375,7 +393,12 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo Object
return nil
}
putOpts := putTransitionOpts(oi)
putOpts, err := putTransitionOpts(oi)
if err != nil {
gr.Close()
return err
}
if _, err = tgt.PutObject(ctx, arn.Bucket, oi.Name, gr, oi.Size, putOpts); err != nil {
gr.Close()
return err
@@ -400,6 +423,7 @@ func transitionObject(ctx context.Context, objectAPI ObjectLayer, objInfo Object
Object: objInfo,
Host: "Internal: [ILM-Transition]",
})
return err
}
@@ -668,11 +692,11 @@ func restoreTransitionedObject(ctx context.Context, bucket, object string, objAP
return err
}
defer gr.Close()
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size, globalCLIContext.StrictS3Compat)
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size)
if err != nil {
return err
}
pReader := NewPutObjReader(hashReader, nil, nil)
pReader := NewPutObjReader(hashReader)
opts := putRestoreOpts(bucket, object, rreq, objInfo)
opts.UserDefined[xhttp.AmzRestore] = fmt.Sprintf("ongoing-request=%t, expiry-date=%s", false, restoreExpiry.Format(http.TimeFormat))
if _, err := objAPI.PutObject(ctx, bucket, object, pReader, opts); err != nil {

View File

@@ -26,32 +26,25 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/sync/errgroup"
)
func concurrentDecryptETag(ctx context.Context, objects []ObjectInfo) {
inParallel := func(objects []ObjectInfo) {
g := errgroup.WithNErrs(len(objects))
for index := range objects {
index := index
g.Go(func() error {
objects[index].ETag = objects[index].GetActualETag(nil)
objects[index].Size, _ = objects[index].GetActualSize()
return nil
}, index)
}
g.Wait()
}
const maxConcurrent = 500
for {
if len(objects) < maxConcurrent {
inParallel(objects)
return
}
inParallel(objects[:maxConcurrent])
objects = objects[maxConcurrent:]
g := errgroup.WithNErrs(len(objects)).WithConcurrency(500)
_, cancel := g.WithCancelOnError(ctx)
defer cancel()
for index := range objects {
index := index
g.Go(func() error {
size, err := objects[index].GetActualSize()
if err == nil {
objects[index].Size = size
}
objects[index].ETag = objects[index].GetActualETag(nil)
return nil
}, index)
}
g.WaitErr()
}
// Validate all the ListObjects query arguments, returns an APIErrorCode
@@ -301,10 +294,6 @@ func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http
return proxyRequest(ctx, w, r, ep)
}
func proxyRequestByStringHash(ctx context.Context, w http.ResponseWriter, r *http.Request, str string) (success bool) {
return proxyRequestByNodeIndex(ctx, w, r, crcHashMod(str, len(globalProxyEndpoints)))
}
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
@@ -343,15 +332,6 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return
}
// Forward the request using Source IP or bucket
forwardStr := handlers.GetSourceIPFromHeaders(r)
if forwardStr == "" {
forwardStr = bucket
}
if proxyRequestByStringHash(ctx, w, r, forwardStr) {
return
}
listObjects := objectAPI.ListObjects
// Inititate a list objects operation based on the input params.

View File

@@ -83,17 +83,38 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
}
}
authType := getRequestAuthType(r)
var signatureVersion string
switch authType {
case authTypeSignedV2, authTypePresignedV2:
signatureVersion = signV2Algorithm
case authTypeSigned, authTypePresigned, authTypeStreamingSigned, authTypePostPolicy:
signatureVersion = signV4Algorithm
}
var authtype string
switch authType {
case authTypePresignedV2, authTypePresigned:
authtype = "REST-QUERY-STRING"
case authTypeSignedV2, authTypeSigned, authTypeStreamingSigned:
authtype = "REST-HEADER"
case authTypePostPolicy:
authtype = "POST"
}
args := map[string][]string{
"CurrentTime": {currTime.Format(time.RFC3339)},
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
"SourceIp": {handlers.GetSourceIP(r)},
"UserAgent": {r.UserAgent()},
"Referer": {r.Referer()},
"principaltype": {principalType},
"userid": {username},
"username": {username},
"versionid": {vid},
"CurrentTime": {currTime.Format(time.RFC3339)},
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
"SourceIp": {handlers.GetSourceIP(r)},
"UserAgent": {r.UserAgent()},
"Referer": {r.Referer()},
"principaltype": {principalType},
"userid": {username},
"username": {username},
"versionid": {vid},
"signatureversion": {signatureVersion},
"authType": {authtype},
}
if lc != "" {

View File

@@ -20,8 +20,9 @@ import (
"context"
"fmt"
"net/http"
"runtime"
"reflect"
"strings"
"sync"
"time"
minio "github.com/minio/minio-go/v7"
@@ -169,22 +170,25 @@ func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToD
}
// isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
for _, header := range standardHeaders {
if strings.ToLower(header) == key {
return true
}
}
return false
func isStandardHeader(matchHeaderKey string) bool {
return equals(matchHeaderKey, standardHeaders...)
}
// returns whether object version is a deletemarker and if object qualifies for replication
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (dm, replicate, sync bool) {
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, gerr error) (replicate, sync bool) {
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
return false, false, sync
return false, sync
}
opts := replication.ObjectOpts{
Name: dobj.ObjectName,
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
UserTags: oi.UserTags,
DeleteMarker: oi.DeleteMarker,
VersionID: dobj.VersionID,
OpType: replication.DeleteReplicationType,
}
replicate = rcfg.Replicate(opts)
// when incoming delete is removal of a delete marker( a.k.a versioned delete),
// GetObjectInfo returns extra information even though it returns errFileNotFound
if gerr != nil {
@@ -193,25 +197,20 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
case replication.Pending, replication.Completed, replication.Failed:
validReplStatus = true
}
if oi.DeleteMarker && validReplStatus {
return oi.DeleteMarker, true, sync
if oi.DeleteMarker && (validReplStatus || replicate) {
return true, sync
}
return oi.DeleteMarker, false, sync
// can be the case that other cluster is down and duplicate `mc rm --vid`
// is issued - this still needs to be replicated back to the other target
return oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed, sync
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
// the target online status should not be used here while deciding
// whether to replicate deletes as the target could be temporarily down
if tgt == nil {
return oi.DeleteMarker, false, false
return false, false
}
opts := replication.ObjectOpts{
Name: dobj.ObjectName,
SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined),
UserTags: oi.UserTags,
DeleteMarker: oi.DeleteMarker,
VersionID: dobj.VersionID,
}
return oi.DeleteMarker, rcfg.Replicate(opts), tgt.replicateSync
return replicate, tgt.replicateSync
}
// replicate deletes to the designated replication target if replication configuration
@@ -226,26 +225,52 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
// on target.
func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectAPI ObjectLayer) {
bucket := dobj.Bucket
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
logger.LogIf(ctx, err)
return
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn))
return
}
versionID := dobj.DeleteMarkerVersionID
if versionID == "" {
versionID = dobj.VersionID
}
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: event.ObjectReplicationNotTracked,
})
return
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, rcfg.RoleArn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, rcfg.RoleArn))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: event.ObjectReplicationNotTracked,
})
return
}
rmErr := tgt.RemoveObject(ctx, rcfg.GetDestination().Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
VersionID: versionID,
Internal: miniogo.AdvancedRemoveOptions{
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
})
@@ -258,6 +283,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
} else {
versionPurgeStatus = Failed
}
logger.LogIf(ctx, fmt.Errorf("Unable to replicate delete marker to %s/%s(%s): %s", rcfg.GetDestination().Bucket, dobj.ObjectName, versionID, rmErr))
} else {
if dobj.VersionID == "" {
replicationStatus = string(replication.Completed)
@@ -272,66 +298,102 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectVersionInfo, objectA
}
// Update metadata on the delete marker or purge permanent delete if replication success.
objInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
dobjInfo, err := objectAPI.DeleteObject(ctx, bucket, dobj.ObjectName, ObjectOptions{
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
DeleteMarkerReplicationStatus: replicationStatus,
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionPurgeStatus: versionPurgeStatus,
Versioned: globalBucketVersioningSys.Enabled(bucket),
VersionSuspended: globalBucketVersioningSys.Suspended(bucket),
})
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s %s: %w", bucket, dobj.ObjectName, dobj.VersionID, err))
if err != nil && !isErrVersionNotFound(err) { // VersionNotFound would be reported by pool that object version is missing on.
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %s", bucket, dobj.ObjectName, versionID, err))
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
Bucket: bucket,
Name: dobj.ObjectName,
VersionID: versionID,
DeleteMarker: dobj.DeleteMarker,
},
Host: "Internal: [Replication]",
EventName: eventName,
})
} else {
sendEvent(eventArgs{
BucketName: bucket,
Object: dobjInfo,
Host: "Internal: [Replication]",
EventName: eventName,
})
}
sendEvent(eventArgs{
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
EventName: eventName,
})
}
func getCopyObjMetadata(oi ObjectInfo, dest replication.Destination) map[string]string {
meta := make(map[string]string, len(oi.UserDefined))
for k, v := range oi.UserDefined {
if k == xhttp.AmzBucketReplicationStatus {
continue
}
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue
}
if equals(k, xhttp.AmzBucketReplicationStatus) {
continue
}
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
continue
}
meta[k] = v
}
if oi.ContentEncoding != "" {
meta[xhttp.ContentEncoding] = oi.ContentEncoding
}
if oi.ContentType != "" {
meta[xhttp.ContentType] = oi.ContentType
}
tag, err := tags.ParseObjectTags(oi.UserTags)
if err != nil {
return nil
}
if tag != nil {
meta[xhttp.AmzObjectTagging] = tag.String()
if oi.UserTags != "" {
meta[xhttp.AmzObjectTagging] = oi.UserTags
meta[xhttp.AmzTagDirective] = "REPLACE"
}
sc := dest.StorageClass
if sc == "" {
sc = oi.StorageClass
}
meta[xhttp.AmzStorageClass] = sc
if oi.UserTags != "" {
meta[xhttp.AmzObjectTagging] = oi.UserTags
if sc != "" {
meta[xhttp.AmzStorageClass] = sc
}
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
meta[xhttp.MinIOSourceETag] = oi.ETag
meta[xhttp.MinIOSourceMTime] = oi.ModTime.Format(time.RFC3339Nano)
meta[xhttp.AmzBucketReplicationStatus] = replication.Replica.String()
return meta
}
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions) {
type caseInsensitiveMap map[string]string
// Lookup map entry case insensitively.
func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
if len(m) == 0 {
return "", false
}
for _, k := range []string{
key,
strings.ToLower(key),
http.CanonicalHeaderKey(key),
} {
v, ok := m[k]
if ok {
return v, ok
}
}
return "", false
}
func putReplicationOpts(ctx context.Context, dest replication.Destination, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
@@ -342,54 +404,58 @@ func putReplicationOpts(ctx context.Context, dest replication.Destination, objIn
}
meta[k] = v
}
tag, err := tags.ParseObjectTags(objInfo.UserTags)
if err != nil {
return
}
sc := dest.StorageClass
if sc == "" {
sc = objInfo.StorageClass
}
putOpts = miniogo.PutObjectOptions{
UserMetadata: meta,
UserTags: tag.ToMap(),
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: sc,
Internal: miniogo.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
ReplicationStatus: miniogo.ReplicationStatusReplica,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
SourceVersionID: objInfo.VersionID,
ReplicationStatus: miniogo.ReplicationStatusReplica,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
}
if lang, ok := objInfo.UserDefined[xhttp.ContentLanguage]; ok {
if objInfo.UserTags != "" {
tag, _ := tags.ParseObjectTags(objInfo.UserTags)
if tag != nil {
putOpts.UserTags = tag.ToMap()
}
}
lkMap := caseInsensitiveMap(objInfo.UserDefined)
if lang, ok := lkMap.Lookup(xhttp.ContentLanguage); ok {
putOpts.ContentLanguage = lang
}
if disp, ok := objInfo.UserDefined[xhttp.ContentDisposition]; ok {
if disp, ok := lkMap.Lookup(xhttp.ContentDisposition); ok {
putOpts.ContentDisposition = disp
}
if cc, ok := objInfo.UserDefined[xhttp.CacheControl]; ok {
if cc, ok := lkMap.Lookup(xhttp.CacheControl); ok {
putOpts.CacheControl = cc
}
if mode, ok := objInfo.UserDefined[xhttp.AmzObjectLockMode]; ok {
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
rmode := miniogo.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := objInfo.UserDefined[xhttp.AmzObjectLockRetainUntilDate]; ok {
rdate, err := time.Parse(time.RFC3339Nano, retainDateStr)
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
rdate, err := time.Parse(time.RFC3339, retainDateStr)
if err != nil {
return
return putOpts, err
}
putOpts.RetainUntilDate = rdate
}
if lhold, ok := objInfo.UserDefined[xhttp.AmzObjectLockLegalHold]; ok {
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
}
if crypto.S3.IsEncrypted(objInfo.UserDefined) {
putOpts.ServerSideEncryption = encrypt.NewSSE()
}
return
}
@@ -401,6 +467,16 @@ const (
replicateAll replicationAction = "all"
)
// matches k1 with all keys, returns 'true' if one of them matches
func equals(k1 string, keys ...string) bool {
for _, k2 := range keys {
if strings.ToLower(k1) == strings.ToLower(k2) {
return true
}
}
return false
}
// returns replicationAction by comparing metadata between source and target
func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationAction {
// needs full replication
@@ -408,83 +484,153 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo) replicationActio
oi1.VersionID != oi2.VersionID ||
oi1.Size != oi2.Size ||
oi1.DeleteMarker != oi2.IsDeleteMarker ||
!oi1.ModTime.Equal(oi2.LastModified) {
oi1.ModTime.Unix() != oi2.LastModified.Unix() {
return replicateAll
}
if oi1.ContentType != oi2.ContentType {
return replicateMetadata
}
if oi1.ContentEncoding != "" {
enc, ok := oi2.Metadata[xhttp.ContentEncoding]
if !ok || strings.Join(enc, "") != oi1.ContentEncoding {
if !ok {
enc, ok = oi2.Metadata[strings.ToLower(xhttp.ContentEncoding)]
if !ok {
return replicateMetadata
}
}
if strings.Join(enc, ",") != oi1.ContentEncoding {
return replicateMetadata
}
}
t, _ := tags.ParseObjectTags(oi1.UserTags)
if !reflect.DeepEqual(oi2.UserTags, t.ToMap()) {
return replicateMetadata
}
// Compare only necessary headers
compareKeys := []string{
"Expires",
"Cache-Control",
"Content-Language",
"Content-Disposition",
"X-Amz-Object-Lock-Mode",
"X-Amz-Object-Lock-Retain-Until-Date",
"X-Amz-Object-Lock-Legal-Hold",
"X-Amz-Website-Redirect-Location",
"X-Amz-Meta-",
}
// compare metadata on both maps to see if meta is identical
for k1, v1 := range oi1.UserDefined {
if v2, ok := oi2.UserMetadata[k1]; ok && v1 == v2 {
continue
compareMeta1 := make(map[string]string)
for k, v := range oi1.UserDefined {
var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
}
if v2, ok := oi2.Metadata[k1]; ok && v1 == strings.Join(v2, "") {
continue
if found {
compareMeta1[strings.ToLower(k)] = v
}
}
compareMeta2 := make(map[string]string)
for k, v := range oi2.Metadata {
var found bool
for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
continue
}
found = true
break
}
if found {
compareMeta2[strings.ToLower(k)] = strings.Join(v, ",")
}
}
if !reflect.DeepEqual(compareMeta1, compareMeta2) {
return replicateMetadata
}
for k1, v1 := range oi2.UserMetadata {
if v2, ok := oi1.UserDefined[k1]; !ok || v1 != v2 {
return replicateMetadata
}
}
for k1, v1slc := range oi2.Metadata {
v1 := strings.Join(v1slc, "")
if k1 == xhttp.ContentEncoding { //already compared
continue
}
if v2, ok := oi1.UserDefined[k1]; !ok || v1 != v2 {
return replicateMetadata
}
}
t, _ := tags.MapToObjectTags(oi2.UserTags)
if t.String() != oi1.UserTags {
return replicateMetadata
}
return replicateNone
}
// replicateObject replicates the specified version of the object to destination bucket
// The source object is then updated to reflect the replication status.
func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLayer) {
z, ok := objectAPI.(*erasureServerPools)
if !ok {
return
}
bucket := objInfo.Bucket
object := objInfo.Name
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(ctx, cfg.RoleArn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, cfg.RoleArn))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, readLock, ObjectOptions{
VersionID: objInfo.VersionID,
})
if err != nil {
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
logger.LogIf(ctx, err)
return
}
defer gr.Close() // hold read lock for entire transaction
objInfo = gr.ObjInfo
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
gr.Close()
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
dest := cfg.GetDestination()
if dest.Bucket == "" {
gr.Close()
logger.LogIf(ctx, fmt.Errorf("Unable to replicate object %s(%s), bucket is empty", objInfo.Name, objInfo.VersionID))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
@@ -497,50 +643,79 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
if err == nil {
rtype = getReplicationAction(objInfo, oi)
if rtype == replicateNone {
gr.Close()
// object with same VersionID already exists, replication kicked off by
// PutObject might have completed.
return
}
}
replicationStatus := replication.Completed
// use core client to avoid doing multipart on PUT
c := &miniogo.Core{Client: tgt.Client}
if rtype != replicateAll {
gr.Close()
// replicate metadata for object tagging/copy with metadata replacement
dstOpts := miniogo.PutObjectOptions{Internal: miniogo.AdvancedPutOptions{SourceVersionID: objInfo.VersionID}}
c := &miniogo.Core{Client: tgt.Client}
_, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), dstOpts)
if err != nil {
srcOpts := miniogo.CopySrcOptions{
Bucket: dest.Bucket,
Object: object,
VersionID: objInfo.VersionID}
dstOpts := miniogo.PutObjectOptions{
Internal: miniogo.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
}}
if _, err = c.CopyObject(ctx, dest.Bucket, object, dest.Bucket, object, getCopyObjMetadata(objInfo, dest), srcOpts, dstOpts); err != nil {
replicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("Unable to replicate metadata for object %s/%s(%s): %s", bucket, objInfo.Name, objInfo.VersionID, err))
}
} else {
target, err := globalBucketMetadataSys.GetBucketTarget(bucket, cfg.RoleArn)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%s", bucket, cfg.RoleArn, err))
gr.Close()
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
putOpts, err := putReplicationOpts(ctx, dest, objInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to get target for replication bucket:%s cfg:%s err:%w", bucket, cfg.RoleArn, err))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
Object: objInfo,
Host: "Internal: [Replication]",
})
return
}
putOpts := putReplicationOpts(ctx, dest, objInfo)
// Setup bandwidth throttling
peers, _ := globalEndpoints.peers()
totalNodesCount := len(peers)
if totalNodesCount == 0 {
totalNodesCount = 1 // For standalone erasure coding
}
b := target.BandwidthLimit / int64(totalNodesCount)
var headerSize int
for k, v := range putOpts.Header() {
headerSize += len(k) + len(v)
}
// r takes over closing gr.
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, objInfo.Bucket, objInfo.Name, gr, headerSize, b, target.BandwidthLimit)
_, err = tgt.PutObject(ctx, dest.Bucket, object, r, size, putOpts)
if err != nil {
replicationStatus = replication.Failed
opts := &bandwidth.MonitorReaderOptions{
Bucket: objInfo.Bucket,
Object: objInfo.Name,
HeaderSize: headerSize,
BandwidthBytesPerSec: target.BandwidthLimit / int64(totalNodesCount),
ClusterBandwidth: target.BandwidthLimit,
}
r := bandwidth.NewMonitoredReader(ctx, globalBucketMonitor, gr, opts)
if _, err = c.PutObject(ctx, dest.Bucket, object, r, size, "", "", putOpts); err != nil {
replicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("Unable to replicate for object %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
}
r.Close()
}
objInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = replicationStatus.String()
@@ -549,7 +724,6 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
}
// FIXME: add support for missing replication events
// - event.ObjectReplicationNotTracked
// - event.ObjectReplicationMissedThreshold
// - event.ObjectReplicationReplicatedAfterThreshold
var eventName = event.ObjectReplicationComplete
@@ -557,16 +731,17 @@ func replicateObject(ctx context.Context, objInfo ObjectInfo, objectAPI ObjectLa
eventName = event.ObjectReplicationFailed
}
objInfo.metadataOnly = true // Perform only metadata updates.
objInfo, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, objInfo, ObjectOptions{
VersionID: objInfo.VersionID,
}, ObjectOptions{
VersionID: objInfo.VersionID,
})
// This lower level implementation is necessary to avoid write locks from CopyObject.
poolIdx, err := z.getPoolIdx(ctx, bucket, object, objInfo.Size)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
} else {
if err = z.serverPools[poolIdx].getHashedSet(object).updateObjectMeta(ctx, bucket, object, objInfo.UserDefined, ObjectOptions{
VersionID: objInfo.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s/%s(%s): %w", bucket, objInfo.Name, objInfo.VersionID, err))
}
}
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
@@ -603,91 +778,102 @@ type DeletedObjectVersionInfo struct {
DeletedObject
Bucket string
}
type replicationState struct {
// add future metrics here
replicaCh chan ObjectInfo
replicaDeleteCh chan DeletedObjectVersionInfo
}
func (r *replicationState) queueReplicaTask(oi ObjectInfo) {
if r == nil {
return
}
select {
case r.replicaCh <- oi:
default:
}
}
func (r *replicationState) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
if r == nil {
return
}
select {
case r.replicaDeleteCh <- doi:
default:
}
}
var (
globalReplicationState *replicationState
// TODO: currently keeping it conservative
// but eventually can be tuned in future,
// take only half the CPUs for replication
// conservatively.
globalReplicationConcurrent = runtime.GOMAXPROCS(0) / 2
globalReplicationPool *ReplicationPool
)
func newReplicationState() *replicationState {
// fix minimum concurrent replication to 1 for single CPU setup
if globalReplicationConcurrent == 0 {
globalReplicationConcurrent = 1
}
rs := &replicationState{
replicaCh: make(chan ObjectInfo, 10000),
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
}
go func() {
<-GlobalContext.Done()
close(rs.replicaCh)
close(rs.replicaDeleteCh)
}()
return rs
// ReplicationPool describes replication pool
type ReplicationPool struct {
mu sync.Mutex
size int
replicaCh chan ObjectInfo
replicaDeleteCh chan DeletedObjectVersionInfo
killCh chan struct{}
wg sync.WaitGroup
ctx context.Context
objLayer ObjectLayer
}
// addWorker creates a new worker to process tasks
func (r *replicationState) addWorker(ctx context.Context, objectAPI ObjectLayer) {
// Add a new worker.
// NewReplicationPool creates a pool of replication workers of specified size
func NewReplicationPool(ctx context.Context, o ObjectLayer, sz int) *ReplicationPool {
pool := &ReplicationPool{
replicaCh: make(chan ObjectInfo, 10000),
replicaDeleteCh: make(chan DeletedObjectVersionInfo, 10000),
ctx: ctx,
objLayer: o,
}
go func() {
for {
select {
case <-ctx.Done():
return
case oi, ok := <-r.replicaCh:
if !ok {
return
}
replicateObject(ctx, oi, objectAPI)
case doi, ok := <-r.replicaDeleteCh:
if !ok {
return
}
replicateDelete(ctx, doi, objectAPI)
}
}
<-ctx.Done()
close(pool.replicaCh)
close(pool.replicaDeleteCh)
}()
pool.Resize(sz)
return pool
}
// AddWorker adds a replication worker to the pool
func (p *ReplicationPool) AddWorker() {
defer p.wg.Done()
for {
select {
case <-p.ctx.Done():
return
case oi, ok := <-p.replicaCh:
if !ok {
return
}
replicateObject(p.ctx, oi, p.objLayer)
case doi, ok := <-p.replicaDeleteCh:
if !ok {
return
}
replicateDelete(p.ctx, doi, p.objLayer)
case <-p.killCh:
return
}
}
}
//Resize replication pool to new size
func (p *ReplicationPool) Resize(n int) {
p.mu.Lock()
defer p.mu.Unlock()
for p.size < n {
p.size++
p.wg.Add(1)
go p.AddWorker()
}
for p.size > n {
p.size--
go func() { p.killCh <- struct{}{} }()
}
}
func (p *ReplicationPool) queueReplicaTask(oi ObjectInfo) {
if p == nil {
return
}
select {
case p.replicaCh <- oi:
default:
}
}
func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectVersionInfo) {
if p == nil {
return
}
select {
case p.replicaDeleteCh <- doi:
default:
}
}
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {
if globalReplicationState == nil {
return
}
// Start with globalReplicationConcurrent.
for i := 0; i < globalReplicationConcurrent; i++ {
globalReplicationState.addWorker(ctx, objectAPI)
}
globalReplicationPool = NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationWorkers())
}
// get Reader from replication target if active-active replication is in place and
@@ -714,8 +900,11 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
return nil, false
}
}
// Make sure to match ETag when proxying.
if err = gopts.SetMatchETag(oi.ETag); err != nil {
return nil, false
}
c := miniogo.Core{Client: tgt.Client}
obj, _, _, err := c.GetObject(ctx, bucket, object, gopts)
if err != nil {
return nil, false
@@ -726,6 +915,7 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
if err != nil {
return nil, false
}
reader.ObjInfo = oi.Clone()
return reader, true
}
@@ -781,6 +971,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
if err != nil {
return nil, oi, false, err
}
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
oi = ObjectInfo{
Bucket: bucket,
@@ -795,12 +986,18 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, opts Objec
Expires: objInfo.Expires,
StorageClass: objInfo.StorageClass,
ReplicationStatus: replication.StatusType(objInfo.ReplicationStatus),
UserDefined: cloneMSS(objInfo.UserMetadata),
UserTags: tags.String(),
}
if ce, ok := oi.UserDefined[xhttp.ContentEncoding]; ok {
oi.UserDefined = make(map[string]string, len(objInfo.Metadata))
for k, v := range objInfo.Metadata {
oi.UserDefined[k] = v[0]
}
ce, ok := oi.UserDefined[xhttp.ContentEncoding]
if !ok {
ce, ok = oi.UserDefined[strings.ToLower(xhttp.ContentEncoding)]
}
if ok {
oi.ContentEncoding = ce
delete(oi.UserDefined, xhttp.ContentEncoding)
}
return tgt, oi, true, nil
}
@@ -816,7 +1013,7 @@ func scheduleReplication(ctx context.Context, objInfo ObjectInfo, o ObjectLayer,
if sync {
replicateObject(ctx, objInfo, o)
} else {
globalReplicationState.queueReplicaTask(objInfo)
globalReplicationPool.queueReplicaTask(objInfo)
}
}
@@ -824,6 +1021,6 @@ func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectVersionInfo,
if sync {
replicateDelete(ctx, dv, o)
} else {
globalReplicationState.queueReplicaDeleteTask(dv)
globalReplicationPool.queueReplicaDeleteTask(dv)
}
}

View File

@@ -18,6 +18,7 @@ package cmd
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"net/http"
@@ -32,7 +33,6 @@ import (
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/madmin"
sha256 "github.com/minio/sha256-simd"
)
const (
@@ -100,7 +100,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
}
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
}
if tgt.Type == madmin.ReplicationService {
if !globalIsErasure {
@@ -111,7 +111,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
}
vcfg, err := clnt.GetBucketVersioning(ctx, tgt.TargetBucket)
if err != nil {
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
}
if vcfg.Status != string(versioning.Enabled) {
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
@@ -124,7 +124,7 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
return BucketRemoteTargetNotFound{Bucket: tgt.TargetBucket}
}
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket}
return BucketRemoteConnectionErr{Bucket: tgt.TargetBucket, Err: err}
}
if vcfg.Status != string(versioning.Enabled) {
return BucketRemoteTargetNotVersioned{Bucket: tgt.TargetBucket}
@@ -362,6 +362,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
api, err := minio.New(tcfg.Endpoint, &miniogo.Options{
Creds: creds,
Secure: tcfg.Secure,
Region: tcfg.Region,
Transport: getRemoteTargetInstanceTransport,
})
if err != nil {

View File

@@ -50,10 +50,14 @@ import (
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
func init() {
rand.Seed(time.Now().UTC().UnixNano())
logger.Init(GOPATH, GOROOT)
logger.RegisterError(config.FmtError)
rand.Seed(time.Now().UTC().UnixNano())
// Inject into config package.
config.Logger.Info = logger.Info
config.Logger.LogIf = logger.LogIf
globalDNSCache = xhttp.NewDNSCache(10*time.Second, 10*time.Second, logger.LogOnceIf)
@@ -69,7 +73,6 @@ func init() {
},
})
globalReplicationState = newReplicationState()
globalTransitionState = newTransitionState()
console.SetColor("Debug", color.New())

View File

@@ -20,6 +20,8 @@ import (
"bytes"
"context"
"errors"
"io/ioutil"
"net/http"
"github.com/minio/minio/pkg/hash"
)
@@ -27,9 +29,9 @@ import (
var errConfigNotFound = errors.New("config file not found")
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
var buffer bytes.Buffer
// Read entire content by setting size to -1
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
r, err := objAPI.GetObjectNInfo(ctx, minioMetaBucket, configFile, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return nil, errConfigNotFound
@@ -37,13 +39,16 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
return nil, err
}
defer r.Close()
// Return config not found on empty content.
if buffer.Len() == 0 {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
if len(buf) == 0 {
return nil, errConfigNotFound
}
return buffer.Bytes(), nil
return buf, nil
}
type objectDeleter interface {
@@ -59,12 +64,12 @@ func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string)
}
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
if err != nil {
return err
}
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader), ObjectOptions{MaxParity: true})
return err
}

View File

@@ -18,6 +18,7 @@ package cmd
import (
"context"
"crypto/tls"
"fmt"
"strings"
"sync"
@@ -26,7 +27,6 @@ import (
"github.com/minio/minio/cmd/config/api"
"github.com/minio/minio/cmd/config/cache"
"github.com/minio/minio/cmd/config/compress"
"github.com/minio/minio/cmd/config/crawler"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/config/etcd"
"github.com/minio/minio/cmd/config/heal"
@@ -34,6 +34,7 @@ import (
"github.com/minio/minio/cmd/config/identity/openid"
"github.com/minio/minio/cmd/config/notify"
"github.com/minio/minio/cmd/config/policy/opa"
"github.com/minio/minio/cmd/config/scanner"
"github.com/minio/minio/cmd/config/storageclass"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
@@ -59,7 +60,7 @@ func initHelp() {
config.LoggerWebhookSubSys: logger.DefaultKVS,
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
config.HealSubSys: heal.DefaultKVS,
config.CrawlerSubSys: crawler.DefaultKVS,
config.ScannerSubSys: scanner.DefaultKVS,
}
for k, v := range notify.DefaultNotificationKVS {
kvs[k] = v
@@ -116,8 +117,8 @@ func initHelp() {
Description: "manage object healing frequency and bitrot verification checks",
},
config.HelpKV{
Key: config.CrawlerSubSys,
Description: "manage crawling for usage calculation, lifecycle, healing and more",
Key: config.ScannerSubSys,
Description: "manage namespace scanning for usage calculation, lifecycle, healing and more",
},
config.HelpKV{
Key: config.LoggerWebhookSubSys,
@@ -199,7 +200,7 @@ func initHelp() {
config.CacheSubSys: cache.Help,
config.CompressionSubSys: compress.Help,
config.HealSubSys: heal.Help,
config.CrawlerSubSys: crawler.Help,
config.ScannerSubSys: scanner.Help,
config.IdentityOpenIDSubSys: openid.Help,
config.IdentityLDAPSubSys: xldap.Help,
config.PolicyOPASubSys: opa.Help,
@@ -273,11 +274,11 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
}
}
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
if _, err = heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
return err
}
if _, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default]); err != nil {
if _, err = scanner.LookupConfig(s[config.ScannerSubSys][config.Default]); err != nil {
return err
}
@@ -295,7 +296,10 @@ func validateConfig(s config.Config, setDriveCounts []int) error {
}
}
{
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
&tls.Config{
RootCAs: globalRootCAs,
}, defaultDialTimeout)())
if err != nil {
return err
}
@@ -471,7 +475,10 @@ func lookupConfigs(s config.Config, setDriveCounts []int) {
}
}
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), newCustomHTTPTransportWithHTTP2(
&tls.Config{
RootCAs: globalRootCAs,
}, defaultDialTimeout)())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
}
@@ -546,7 +553,7 @@ If you need help to migrate smoothly visit: https://min.io/pricing`
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
http.WithTransport(NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)),
),
); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize audit HTTP target: %w", err))
@@ -599,10 +606,10 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
return fmt.Errorf("Unable to apply heal config: %w", err)
}
// Crawler
crawlerCfg, err := crawler.LookupConfig(s[config.CrawlerSubSys][config.Default])
// Scanner
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply crawler config: %w", err)
return fmt.Errorf("Unable to apply scanner config: %w", err)
}
// Apply configurations.
@@ -617,7 +624,7 @@ func applyDynamicConfig(ctx context.Context, objAPI ObjectLayer, s config.Config
globalHealConfig = healCfg
globalHealConfigMu.Unlock()
logger.LogIf(ctx, crawlerSleeper.Update(crawlerCfg.Delay, crawlerCfg.MaxWait))
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
// Update all dynamic config values in memory.
globalServerConfigMu.Lock()

View File

@@ -29,14 +29,14 @@ import (
// API sub-system constants
const (
apiRequestsMax = "requests_max"
apiRequestsDeadline = "requests_deadline"
apiClusterDeadline = "cluster_deadline"
apiCorsAllowOrigin = "cors_allow_origin"
apiRemoteTransportDeadline = "remote_transport_deadline"
apiListQuorum = "list_quorum"
apiExtendListCacheLife = "extend_list_cache_life"
apiRequestsMax = "requests_max"
apiRequestsDeadline = "requests_deadline"
apiClusterDeadline = "cluster_deadline"
apiCorsAllowOrigin = "cors_allow_origin"
apiRemoteTransportDeadline = "remote_transport_deadline"
apiListQuorum = "list_quorum"
apiExtendListCacheLife = "extend_list_cache_life"
apiReplicationWorkers = "replication_workers"
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE"
@@ -45,6 +45,7 @@ const (
EnvAPIListQuorum = "MINIO_API_LIST_QUORUM"
EnvAPIExtendListCacheLife = "MINIO_API_EXTEND_LIST_CACHE_LIFE"
EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS"
EnvAPIReplicationWorkers = "MINIO_API_REPLICATION_WORKERS"
)
// Deprecated key and ENVs
@@ -78,12 +79,16 @@ var (
},
config.KV{
Key: apiListQuorum,
Value: "optimal",
Value: "strict",
},
config.KV{
Key: apiExtendListCacheLife,
Value: "0s",
},
config.KV{
Key: apiReplicationWorkers,
Value: "100",
},
}
)
@@ -96,6 +101,7 @@ type Config struct {
RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"`
ListQuorum string `json:"list_strict_quorum"`
ExtendListLife time.Duration `json:"extend_list_cache_life"`
ReplicationWorkers int `json:"replication_workers"`
}
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
@@ -173,6 +179,15 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
return cfg, err
}
replicationWorkers, err := strconv.Atoi(env.Get(EnvAPIReplicationWorkers, kvs.Get(apiReplicationWorkers)))
if err != nil {
return cfg, err
}
if replicationWorkers <= 0 {
return cfg, config.ErrInvalidReplicationWorkersValue(nil).Msg("Minimum number of replication workers should be 1")
}
return Config{
RequestsMax: requestsMax,
RequestsDeadline: requestsDeadline,
@@ -181,5 +196,6 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
RemoteTransportDeadline: remoteTransportDeadline,
ListQuorum: listQuorum,
ExtendListLife: listLife,
ReplicationWorkers: replicationWorkers,
}, nil
}

View File

@@ -45,5 +45,11 @@ var (
Optional: true,
Type: "duration",
},
config.HelpKV{
Key: apiReplicationWorkers,
Description: `set the number of replication workers, defaults to 100`,
Optional: true,
Type: "number",
},
}
)

View File

@@ -40,19 +40,13 @@ var (
},
config.HelpKV{
Key: Exclude,
Description: `comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe"`,
Description: `exclude cache for following patterns e.g. "bucket/*.tmp,*.exe"`,
Optional: true,
Type: "csv",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
config.HelpKV{
Key: After,
Description: `minimum accesses before caching an object`,
Description: `minimum number of access before caching an object`,
Optional: true,
Type: "number",
},
@@ -80,5 +74,11 @@ var (
Optional: true,
Type: "string",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
}
)

View File

@@ -23,6 +23,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"io/ioutil"
"github.com/minio/minio/pkg/env"
@@ -113,3 +114,12 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
}
return cert, nil
}
// EnsureCertAndKey checks if both client certificate and key paths are provided
func EnsureCertAndKey(ClientCert, ClientKey string) error {
if (ClientCert != "" && ClientKey == "") ||
(ClientCert == "" && ClientKey != "") {
return errors.New("cert and key must be specified as a pair")
}
return nil
}

View File

@@ -45,7 +45,7 @@ const (
// Include-list for compression.
DefaultExtensions = ".txt,.log,.csv,.json,.tar,.xml,.bin"
DefaultMimeTypes = "text/*,application/json,application/xml"
DefaultMimeTypes = "text/*,application/json,application/xml,binary/octet-stream"
)
// DefaultKVS - default KV config for compression settings

View File

@@ -77,6 +77,7 @@ const (
LoggerWebhookSubSys = "logger_webhook"
AuditWebhookSubSys = "audit_webhook"
HealSubSys = "heal"
ScannerSubSys = "scanner"
CrawlerSubSys = "crawler"
// Add new constants here if you add new fields to config.
@@ -114,7 +115,7 @@ var SubSystems = set.CreateStringSet(
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
CrawlerSubSys,
ScannerSubSys,
HealSubSys,
NotifyAMQPSubSys,
NotifyESSubSys,
@@ -132,7 +133,7 @@ var SubSystems = set.CreateStringSet(
var SubSystemsDynamic = set.CreateStringSet(
APISubSys,
CompressionSubSys,
CrawlerSubSys,
ScannerSubSys,
HealSubSys,
)
@@ -151,7 +152,7 @@ var SubSystemsSingleTargets = set.CreateStringSet([]string{
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
HealSubSys,
CrawlerSubSys,
ScannerSubSys,
}...)
// Constant separators
@@ -462,6 +463,13 @@ func LookupWorm() (bool, error) {
return ParseBool(env.Get(EnvWorm, EnableOff))
}
// Carries all the renamed sub-systems from their
// previously known names
var renamedSubsys = map[string]string{
CrawlerSubSys: ScannerSubSys,
// Add future sub-system renames
}
// Merge - merges a new config with all the
// missing values for default configs,
// returns a config.
@@ -477,9 +485,21 @@ func (c Config) Merge() Config {
}
}
if _, ok := cp[subSys]; !ok {
// A config subsystem was removed or server was downgraded.
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
continue
rnSubSys, ok := renamedSubsys[subSys]
if !ok {
// A config subsystem was removed or server was downgraded.
Logger.Info("config: ignoring unknown subsystem config %q\n", subSys)
continue
}
// Copy over settings from previous sub-system
// to newly renamed sub-system
for _, kv := range cp[rnSubSys][Default] {
_, ok := c[subSys][tgt].Lookup(kv.Key)
if !ok {
ckvs.Set(kv.Key, kv.Value)
}
}
subSys = rnSubSys
}
cp[subSys][tgt] = ckvs
}

View File

@@ -23,21 +23,23 @@ const (
// Top level common ENVs
const (
EnvAccessKey = "MINIO_ACCESS_KEY"
EnvSecretKey = "MINIO_SECRET_KEY"
EnvRootUser = "MINIO_ROOT_USER"
EnvRootPassword = "MINIO_ROOT_PASSWORD"
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
EnvBrowser = "MINIO_BROWSER"
EnvDomain = "MINIO_DOMAIN"
EnvRegionName = "MINIO_REGION_NAME"
EnvPublicIPs = "MINIO_PUBLIC_IPS"
EnvFSOSync = "MINIO_FS_OSYNC"
EnvArgs = "MINIO_ARGS"
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
EnvAccessKey = "MINIO_ACCESS_KEY"
EnvSecretKey = "MINIO_SECRET_KEY"
EnvRootUser = "MINIO_ROOT_USER"
EnvRootPassword = "MINIO_ROOT_PASSWORD"
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
EnvRootUserOld = "MINIO_ROOT_USER_OLD"
EnvRootPasswordOld = "MINIO_ROOT_PASSWORD_OLD"
EnvBrowser = "MINIO_BROWSER"
EnvDomain = "MINIO_DOMAIN"
EnvRegionName = "MINIO_REGION_NAME"
EnvPublicIPs = "MINIO_PUBLIC_IPS"
EnvFSOSync = "MINIO_FS_OSYNC"
EnvArgs = "MINIO_ARGS"
EnvDNSWebhook = "MINIO_DNS_WEBHOOK_ENDPOINT"
EnvLogPosixTimes = "MINIO_LOG_POSIX_TIMES"
EnvLogPosixThresholdInMS = "MINIO_LOG_POSIX_THRESHOLD_MS"
EnvUpdate = "MINIO_UPDATE"

View File

@@ -281,4 +281,10 @@ Example 1:
"",
"Refer to https://docs.min.io/docs/minio-kms-quickstart-guide.html for setting up SSE",
)
ErrInvalidReplicationWorkersValue = newErrFn(
"Invalid value for replication workers",
"",
"MINIO_API_REPLICATION_WORKERS: should be > 0",
)
)

View File

@@ -66,7 +66,7 @@ var (
Help = config.HelpKVS{
config.HelpKV{
Key: Bitrot,
Description: `perform bitrot scan on disks when checking objects during crawl`,
Description: `perform bitrot scan on disks when checking objects during scanner`,
Optional: true,
Type: "on|off",
},

View File

@@ -21,13 +21,14 @@ import (
"crypto/x509"
"errors"
"fmt"
"math/rand"
"net"
"strings"
"time"
ldap "github.com/go-ldap/ldap/v3"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
ldap "gopkg.in/ldap.v3"
)
const (
@@ -58,7 +59,6 @@ type Config struct {
GroupSearchBaseDistName string `json:"groupSearchBaseDN"`
GroupSearchBaseDistNames []string `json:"-"`
GroupSearchFilter string `json:"groupSearchFilter"`
GroupNameAttribute string `json:"groupNameAttribute"`
// Lookup bind LDAP service account
LookupBindDN string `json:"lookupBindDN"`
@@ -82,7 +82,6 @@ const (
UserDNSearchFilter = "user_dn_search_filter"
UsernameFormat = "username_format"
GroupSearchFilter = "group_search_filter"
GroupNameAttribute = "group_name_attribute"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerInsecure = "server_insecure"
@@ -97,7 +96,6 @@ const (
EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN"
EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupNameAttribute = "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN"
EnvLookupBindPassword = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD"
@@ -106,6 +104,7 @@ const (
var removedKeys = []string{
"username_search_filter",
"username_search_base_dn",
"group_name_attribute",
}
// DefaultKVS - default config for LDAP config
@@ -131,10 +130,6 @@ var (
Key: GroupSearchFilter,
Value: "",
},
config.KV{
Key: GroupNameAttribute,
Value: "",
},
config.KV{
Key: GroupSearchBaseDN,
Value: "",
@@ -180,13 +175,22 @@ func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
for _, entry := range sres.Entries {
// We only queried one attribute,
// so we only look up the first one.
groups = append(groups, entry.Attributes[0].Values...)
groups = append(groups, entry.DN)
}
return groups, nil
}
func (l *Config) lookupBind(conn *ldap.Conn) error {
return conn.Bind(l.LookupBindDN, l.LookupBindPassword)
var err error
if l.LookupBindPassword == "" {
err = conn.UnauthenticatedBind(l.LookupBindDN)
} else {
err = conn.Bind(l.LookupBindDN, l.LookupBindPassword)
}
if ldap.IsErrorWithCode(err, 49) {
return fmt.Errorf("LDAP Lookup Bind user invalid credentials error: %v", err)
}
return err
}
// usernameFormatsBind - Iterates over all given username formats and expects
@@ -208,6 +212,8 @@ func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string)
if errs[i] == nil {
bindDistNames = append(bindDistNames, bindDN)
successCount++
} else if !ldap.IsErrorWithCode(errs[i], 49) {
return "", fmt.Errorf("LDAP Bind request failed with unexpected error: %v", errs[i])
}
}
if successCount == 0 {
@@ -217,7 +223,7 @@ func (l *Config) usernameFormatsBind(conn *ldap.Conn, username, password string)
errStrings = append(errStrings, err.Error())
}
}
outErr := fmt.Sprintf("All username formats failed with: %s", strings.Join(errStrings, "; "))
outErr := fmt.Sprintf("All username formats failed due to invalid credentials: %s", strings.Join(errStrings, "; "))
return "", errors.New(outErr)
}
if successCount > 1 {
@@ -307,12 +313,13 @@ func (l *Config) Bind(username, password string) (string, []string, error) {
var groups []string
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(bindDN), -1)
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
[]string{l.GroupNameAttribute},
nil,
nil,
)
@@ -368,6 +375,38 @@ func (l Config) GetExpiryDuration() time.Duration {
return l.stsExpiryDuration
}
func (l Config) testConnection() error {
conn, err := l.Connect()
if err != nil {
return fmt.Errorf("Error creating connection to LDAP server: %v", err)
}
defer conn.Close()
if l.isUsingLookupBind {
if err = l.lookupBind(conn); err != nil {
return fmt.Errorf("Error connecting as LDAP Lookup Bind user: %v", err)
}
return nil
}
// Generate some random user credentials for username formats mode test.
username := fmt.Sprintf("sometestuser%09d", rand.Int31n(1000000000))
charset := []byte("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
rand.Shuffle(len(charset), func(i, j int) {
charset[i], charset[j] = charset[j], charset[i]
})
password := string(charset[:20])
_, err = l.usernameFormatsBind(conn, username, password)
if err == nil {
// We don't expect to successfully guess a credential in this
// way.
return fmt.Errorf("Unexpected random credentials success for user=%s password=%s", username, password)
} else if strings.HasPrefix(err.Error(), "All username formats failed due to invalid credentials: ") {
return nil
}
return fmt.Errorf("LDAP connection test error: %v", err)
}
// Enabled returns if jwks is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(ServerAddr) != ""
@@ -427,14 +466,14 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
// Lookup bind user configuration
lookupBindDN := env.Get(EnvLookupBindDN, kvs.Get(LookupBindDN))
lookupBindPassword := env.Get(EnvLookupBindPassword, kvs.Get(LookupBindPassword))
if lookupBindDN != "" && lookupBindPassword != "" {
if lookupBindDN != "" {
l.LookupBindDN = lookupBindDN
l.LookupBindPassword = lookupBindPassword
l.isUsingLookupBind = true
// User DN search configuration
userDNSearchBaseDN := env.Get(EnvUserDNSearchBaseDN, kvs.Get(UserDNSearchBaseDN))
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(EnvUserDNSearchFilter))
userDNSearchFilter := env.Get(EnvUserDNSearchFilter, kvs.Get(UserDNSearchFilter))
if userDNSearchFilter == "" || userDNSearchBaseDN == "" {
return l, errors.New("In lookup bind mode, userDN search base DN and userDN search filter are both required")
}
@@ -461,23 +500,22 @@ func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
return l, errors.New("Either Lookup Bind mode or Username Format mode is required.")
}
// Test connection to LDAP server.
if err := l.testConnection(); err != nil {
return l, fmt.Errorf("Connection test for LDAP server failed: %v", err)
}
// Group search params configuration
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
grpSearchNameAttr := env.Get(EnvGroupNameAttribute, kvs.Get(GroupNameAttribute))
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
// Either all group params must be set or none must be set.
var allSet bool
if grpSearchFilter != "" {
if grpSearchNameAttr == "" || grpSearchBaseDN == "" {
return l, errors.New("All group related parameters must be set")
}
allSet = true
if (grpSearchFilter != "" && grpSearchBaseDN == "") || (grpSearchFilter == "" && grpSearchBaseDN != "") {
return l, errors.New("All group related parameters must be set")
}
if allSet {
if grpSearchFilter != "" {
l.GroupSearchFilter = grpSearchFilter
l.GroupNameAttribute = grpSearchNameAttr
l.GroupSearchBaseDistName = grpSearchBaseDN
l.GroupSearchBaseDistNames = strings.Split(l.GroupSearchBaseDistName, dnDelimiter)
}

View File

@@ -68,12 +68,6 @@ var (
Optional: true,
Type: "string",
},
config.HelpKV{
Key: GroupNameAttribute,
Description: `search attribute for group name e.g. "cn"`,
Optional: true,
Type: "string",
},
config.HelpKV{
Key: GroupSearchBaseDN,
Description: `";" separated list of group search base DNs e.g. "dc=myldapserver,dc=com"`,

View File

@@ -41,10 +41,6 @@ func SetIdentityLDAP(s config.Config, ldapArgs Config) {
Key: GroupSearchFilter,
Value: ldapArgs.GroupSearchFilter,
},
config.KV{
Key: GroupNameAttribute,
Value: ldapArgs.GroupNameAttribute,
},
config.KV{
Key: GroupSearchBaseDN,
Value: ldapArgs.GroupSearchBaseDistName,

View File

@@ -1,18 +1,18 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !fips
package openid
@@ -22,7 +22,7 @@ import (
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
)
// Specific instances for EC256 and company

View File

@@ -1,18 +1,18 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// MinIO Cloud Storage, (C) 2020 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !fips
package openid
@@ -22,7 +22,7 @@ import (
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
)
// Specific instances for RS256 and company

View File

@@ -14,7 +14,7 @@
* limitations under the License.
*/
package crawler
package scanner
import (
"strconv"
@@ -29,8 +29,10 @@ const (
Delay = "delay"
MaxWait = "max_wait"
EnvDelay = "MINIO_CRAWLER_DELAY"
EnvMaxWait = "MINIO_CRAWLER_MAX_WAIT"
EnvDelay = "MINIO_SCANNER_DELAY"
EnvDelayLegacy = "MINIO_CRAWLER_DELAY"
EnvMaxWait = "MINIO_SCANNER_MAX_WAIT"
EnvMaxWaitLegacy = "MINIO_CRAWLER_MAX_WAIT"
)
// Config represents the heal settings.
@@ -58,7 +60,7 @@ var (
Help = config.HelpKVS{
config.HelpKV{
Key: Delay,
Description: `crawler delay multiplier, defaults to '10.0'`,
Description: `scanner delay multiplier, defaults to '10.0'`,
Optional: true,
Type: "float",
},
@@ -73,14 +75,22 @@ var (
// LookupConfig - lookup config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.CrawlerSubSys, kvs, DefaultKVS); err != nil {
if err = config.CheckValidKeys(config.ScannerSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
cfg.Delay, err = strconv.ParseFloat(env.Get(EnvDelay, kvs.Get(Delay)), 64)
delay := env.Get(EnvDelayLegacy, "")
if delay == "" {
delay = env.Get(EnvDelay, kvs.Get(Delay))
}
cfg.Delay, err = strconv.ParseFloat(delay, 64)
if err != nil {
return cfg, err
}
cfg.MaxWait, err = time.ParseDuration(env.Get(EnvMaxWait, kvs.Get(MaxWait)))
maxWait := env.Get(EnvMaxWaitLegacy, "")
if maxWait == "" {
maxWait = env.Get(EnvMaxWait, kvs.Get(MaxWait))
}
cfg.MaxWait, err = time.ParseDuration(maxWait)
if err != nil {
return cfg, err
}

View File

@@ -18,6 +18,7 @@ import (
"errors"
"math/rand"
"net/http"
"os"
"reflect"
"strconv"
"strings"
@@ -410,6 +411,9 @@ func NewKMS(cfg KMSConfig) (kms KMS, err error) {
} else if cfg.Vault.Enabled && cfg.Kes.Enabled {
return kms, errors.New("Ambiguous KMS configuration: vault configuration and kes configuration are provided at the same time")
} else if cfg.Vault.Enabled {
if v, ok := os.LookupEnv("MINIO_KMS_VAULT_DEPRECATION"); !ok || v != "off" { // TODO(aead): Remove once Vault support has been removed
return kms, errors.New("Hashicorp Vault is deprecated and will be removed Oct. 2021. To temporarily enable Hashicorp Vault support, set MINIO_KMS_VAULT_DEPRECATION=off")
}
kms, err = NewVault(cfg.Vault)
if err != nil {
return kms, err

View File

@@ -81,8 +81,6 @@ var (
errInvalidInternalIV = Errorf("The internal encryption IV is malformed")
errInvalidInternalSealAlgorithm = Errorf("The internal seal algorithm is invalid and not supported")
errMissingUpdatedKey = Errorf("The key update returned no error but also no sealed key")
)
var (

View File

@@ -181,7 +181,10 @@ func (kes *kesService) CreateKey(keyID string) error { return kes.client.CreateK
// named key referenced by keyID. It also binds the generated key
// cryptographically to the provided context.
func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
context := ctx.AppendTo(make([]byte, 0, 128))
context, err := ctx.MarshalText()
if err != nil {
return key, nil, err
}
var plainKey []byte
plainKey, sealedKey, err = kes.client.GenerateDataKey(keyID, context)
@@ -203,7 +206,10 @@ func (kes *kesService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
// The context must be same context as the one provided while
// generating the plaintext key / sealedKey.
func (kes *kesService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
context := ctx.AppendTo(make([]byte, 0, 128))
context, err := ctx.MarshalText()
if err != nil {
return key, err
}
var plainKey []byte
plainKey, err = kes.client.DecryptDataKey(keyID, sealedKey, context)

View File

@@ -19,13 +19,13 @@ import (
"context"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"path"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"github.com/minio/sio"
)

View File

@@ -19,13 +19,12 @@ import (
"context"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"errors"
"fmt"
"io"
"sort"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"github.com/minio/sio"
)
@@ -33,74 +32,29 @@ import (
// associated with a certain object.
type Context map[string]string
// WriteTo writes the context in a canonical from to w.
// It returns the number of bytes and the first error
// encounter during writing to w, if any.
//
// WriteTo sorts the context keys and writes the sorted
// key-value pairs as canonical JSON object to w.
// Sort order is based on the un-escaped keys.
//
// Note that neither keys nor values are escaped for JSON.
func (c Context) WriteTo(w io.Writer) (n int64, err error) {
sortedKeys := make(sort.StringSlice, 0, len(c))
for k := range c {
sortedKeys = append(sortedKeys, k)
}
sort.Sort(sortedKeys)
// MarshalText returns a canonical text representation of
// the Context.
escape := func(s string) string {
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
EscapeStringJSON(buf, s)
return buf.String()
}
nn, err := io.WriteString(w, "{")
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
for i, k := range sortedKeys {
s := fmt.Sprintf("\"%s\":\"%s\",", escape(k), escape(c[k]))
if i == len(sortedKeys)-1 {
s = s[:len(s)-1] // remove last ','
}
nn, err = io.WriteString(w, s)
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
}
nn, err = io.WriteString(w, "}")
return n + int64(nn), err
}
// AppendTo appends the context in a canonical from to dst.
//
// AppendTo sorts the context keys and writes the sorted
// key-value pairs as canonical JSON object to w.
// Sort order is based on the un-escaped keys.
//
// Note that neither keys nor values are escaped for JSON.
func (c Context) AppendTo(dst []byte) (output []byte) {
// MarshalText sorts the context keys and writes the sorted
// key-value pairs as canonical JSON object. The sort order
// is based on the un-escaped keys.
func (c Context) MarshalText() ([]byte, error) {
if len(c) == 0 {
return append(dst, '{', '}')
return []byte{'{', '}'}, nil
}
// out should not escape.
out := bytes.NewBuffer(dst)
// No need to copy+sort
// Pre-allocate a buffer - 128 bytes is an arbitrary
// heuristic value that seems like a good starting size.
var b = bytes.NewBuffer(make([]byte, 0, 128))
if len(c) == 1 {
for k, v := range c {
out.WriteString(`{"`)
EscapeStringJSON(out, k)
out.WriteString(`":"`)
EscapeStringJSON(out, v)
out.WriteString(`"}`)
b.WriteString(`{"`)
EscapeStringJSON(b, k)
b.WriteString(`":"`)
EscapeStringJSON(b, v)
b.WriteString(`"}`)
}
return out.Bytes()
return b.Bytes(), nil
}
sortedKeys := make([]string, 0, len(c))
@@ -109,19 +63,19 @@ func (c Context) AppendTo(dst []byte) (output []byte) {
}
sort.Strings(sortedKeys)
out.WriteByte('{')
b.WriteByte('{')
for i, k := range sortedKeys {
out.WriteByte('"')
EscapeStringJSON(out, k)
out.WriteString(`":"`)
EscapeStringJSON(out, c[k])
out.WriteByte('"')
b.WriteByte('"')
EscapeStringJSON(b, k)
b.WriteString(`":"`)
EscapeStringJSON(b, c[k])
b.WriteByte('"')
if i < len(sortedKeys)-1 {
out.WriteByte(',')
b.WriteByte(',')
}
}
out.WriteByte('}')
return out.Bytes()
b.WriteByte('}')
return b.Bytes(), nil
}
// KMS represents an active and authenticted connection
@@ -225,9 +179,11 @@ func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte)
if context == nil {
context = Context{}
}
ctxBytes, _ := context.MarshalText()
mac := hmac.New(sha256.New, kms.masterKey[:])
mac.Write([]byte(keyID))
mac.Write(context.AppendTo(make([]byte, 0, 128)))
mac.Write(ctxBytes)
mac.Sum(key[:0])
return key
}

View File

@@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"path"
"strings"
"testing"
)
@@ -61,7 +60,7 @@ func TestMasterKeyKMS(t *testing.T) {
}
}
var contextWriteToTests = []struct {
var contextMarshalTextTests = []struct {
Context Context
ExpectedJSON string
}{
@@ -76,43 +75,29 @@ var contextWriteToTests = []struct {
6: {Context: Context{"a": "<>&"}, ExpectedJSON: `{"a":"\u003c\u003e\u0026"}`},
}
func TestContextWriteTo(t *testing.T) {
for i, test := range contextWriteToTests {
var jsonContext strings.Builder
if _, err := test.Context.WriteTo(&jsonContext); err != nil {
t.Errorf("Test %d: Failed to encode context: %v", i, err)
continue
func TestContextMarshalText(t *testing.T) {
for i, test := range contextMarshalTextTests {
text, err := test.Context.MarshalText()
if err != nil {
t.Fatalf("Test %d: Failed to encode context: %v", i, err)
}
if s := jsonContext.String(); s != test.ExpectedJSON {
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
if string(text) != test.ExpectedJSON {
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, string(text), test.ExpectedJSON)
}
}
}
func TestContextAppendTo(t *testing.T) {
for i, test := range contextWriteToTests {
dst := make([]byte, 0, 1024)
dst = test.Context.AppendTo(dst)
if s := string(dst); s != test.ExpectedJSON {
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON)
}
// Append one more
dst = test.Context.AppendTo(dst)
if s := string(dst); s != test.ExpectedJSON+test.ExpectedJSON {
t.Errorf("Test %d: JSON representation differ - got: '%s' want: '%s'", i, s, test.ExpectedJSON+test.ExpectedJSON)
}
}
}
func BenchmarkContext_AppendTo(b *testing.B) {
func BenchmarkContext(b *testing.B) {
tests := []Context{{}, {"bucket": "warp-benchmark-bucket"}, {"0": "1", "-": "2", ".": "#"}, {"34trg": "dfioutr89", "ikjfdghkjf": "jkedfhgfjkhg", "sdfhsdjkh": "if88889", "asddsirfh804": "kjfdshgdfuhgfg78-45604586#$%<>&"}}
for _, test := range tests {
b.Run(fmt.Sprintf("%d-elems", len(test)), func(b *testing.B) {
dst := make([]byte, 0, 1024)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
dst = test.AppendTo(dst[:0])
_, err := test.MarshalText()
if err != nil {
b.Fatal(err)
}
}
})
}

View File

@@ -44,6 +44,15 @@ const (
// MetaDataEncryptionKey is the sealed data encryption key (DEK) received from
// the KMS.
MetaDataEncryptionKey = "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key"
// MetaContext is the KMS context provided by a client when encrypting an
// object with SSE-KMS. A client may not send a context in which case the
// MetaContext will not be present.
// MetaContext only contains the bucket/object name if the client explicitly
// added it. However, when decrypting an object the bucket/object name must
// be part of the object. Therefore, the bucket/object name must be added
// to the context, if not present, whenever a decryption is performed.
MetaContext = "X-Minio-Internal-Server-Side-Encryption-Context"
)
// IsMultiPart returns true if the object metadata indicates
@@ -109,26 +118,35 @@ func IsSourceEncrypted(metadata map[string]string) bool {
//
// IsEncrypted only checks whether the metadata contains at least
// one entry indicating SSE-C or SSE-S3.
func IsEncrypted(metadata map[string]string) bool {
if _, ok := metadata[MetaIV]; ok {
return true
}
if _, ok := metadata[MetaAlgorithm]; ok {
return true
}
if IsMultiPart(metadata) {
return true
func IsEncrypted(metadata map[string]string) (Type, bool) {
if S3KMS.IsEncrypted(metadata) {
return S3KMS, true
}
if S3.IsEncrypted(metadata) {
return true
return S3, true
}
if SSEC.IsEncrypted(metadata) {
return true
return SSEC, true
}
if S3KMS.IsEncrypted(metadata) {
return true
if IsMultiPart(metadata) {
return nil, true
}
return false
if _, ok := metadata[MetaIV]; ok {
return nil, true
}
if _, ok := metadata[MetaAlgorithm]; ok {
return nil, true
}
if _, ok := metadata[MetaKeyID]; ok {
return nil, true
}
if _, ok := metadata[MetaDataEncryptionKey]; ok {
return nil, true
}
if _, ok := metadata[MetaContext]; ok {
return nil, true
}
return nil, false
}
// CreateMultipartMetadata adds the multipart flag entry to metadata

View File

@@ -59,7 +59,7 @@ var isEncryptedTests = []struct {
func TestIsEncrypted(t *testing.T) {
for i, test := range isEncryptedTests {
if isEncrypted := IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
if _, isEncrypted := IsEncrypted(test.Metadata); isEncrypted != test.Encrypted {
t.Errorf("Test %d: got '%v' - want '%v'", i, isEncrypted, test.Encrypted)
}
}
@@ -74,8 +74,8 @@ var s3IsEncryptedTests = []struct {
{Encrypted: false, Metadata: map[string]string{MetaAlgorithm: ""}}, // 2
{Encrypted: false, Metadata: map[string]string{MetaSealedKeySSEC: ""}}, // 3
{Encrypted: true, Metadata: map[string]string{MetaSealedKeyS3: ""}}, // 4
{Encrypted: true, Metadata: map[string]string{MetaKeyID: ""}}, // 5
{Encrypted: true, Metadata: map[string]string{MetaDataEncryptionKey: ""}}, // 6
{Encrypted: false, Metadata: map[string]string{MetaKeyID: ""}}, // 5
{Encrypted: false, Metadata: map[string]string{MetaDataEncryptionKey: ""}}, // 6
{Encrypted: false, Metadata: map[string]string{"": ""}}, // 7
{Encrypted: false, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption": ""}}, // 8
}

View File

@@ -82,12 +82,6 @@ func (ssekms) IsEncrypted(metadata map[string]string) bool {
if _, ok := metadata[MetaSealedKeyKMS]; ok {
return true
}
if _, ok := metadata[MetaKeyID]; ok {
return true
}
if _, ok := metadata[MetaDataEncryptionKey]; ok {
return true
}
return false
}
@@ -95,11 +89,14 @@ func (ssekms) IsEncrypted(metadata map[string]string) bool {
// from the metadata using KMS and returns the decrypted object
// key.
func (s3 ssekms) UnsealObjectKey(kms KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) {
keyID, kmsKey, sealedKey, err := s3.ParseMetadata(metadata)
keyID, kmsKey, sealedKey, ctx, err := s3.ParseMetadata(metadata)
if err != nil {
return key, err
}
unsealKey, err := kms.UnsealKey(keyID, kmsKey, Context{bucket: path.Join(bucket, object)})
if _, ok := ctx[bucket]; !ok {
ctx[bucket] = path.Join(bucket, object)
}
unsealKey, err := kms.UnsealKey(keyID, kmsKey, ctx)
if err != nil {
return key, err
}
@@ -147,19 +144,19 @@ func (ssekms) CreateMetadata(metadata map[string]string, keyID string, kmsKey []
// KMS data key it returns both. If the metadata does not contain neither a
// KMS master key ID nor a sealed KMS data key it returns an empty keyID and
// KMS data key. Otherwise, it returns an error.
func (ssekms) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte, sealedKey SealedKey, err error) {
func (ssekms) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []byte, sealedKey SealedKey, ctx Context, err error) {
// Extract all required values from object metadata
b64IV, ok := metadata[MetaIV]
if !ok {
return keyID, kmsKey, sealedKey, errMissingInternalIV
return keyID, kmsKey, sealedKey, ctx, errMissingInternalIV
}
algorithm, ok := metadata[MetaAlgorithm]
if !ok {
return keyID, kmsKey, sealedKey, errMissingInternalSealAlgorithm
return keyID, kmsKey, sealedKey, ctx, errMissingInternalSealAlgorithm
}
b64SealedKey, ok := metadata[MetaSealedKeyKMS]
if !ok {
return keyID, kmsKey, sealedKey, Errorf("The object metadata is missing the internal sealed key for SSE-S3")
return keyID, kmsKey, sealedKey, ctx, Errorf("The object metadata is missing the internal sealed key for SSE-S3")
}
// There are two possibilites:
@@ -169,33 +166,44 @@ func (ssekms) ParseMetadata(metadata map[string]string) (keyID string, kmsKey []
keyID, idPresent := metadata[MetaKeyID]
b64KMSSealedKey, kmsKeyPresent := metadata[MetaDataEncryptionKey]
if !idPresent && kmsKeyPresent {
return keyID, kmsKey, sealedKey, Errorf("The object metadata is missing the internal KMS key-ID for SSE-S3")
return keyID, kmsKey, sealedKey, ctx, Errorf("The object metadata is missing the internal KMS key-ID for SSE-S3")
}
if idPresent && !kmsKeyPresent {
return keyID, kmsKey, sealedKey, Errorf("The object metadata is missing the internal sealed KMS data key for SSE-S3")
return keyID, kmsKey, sealedKey, ctx, Errorf("The object metadata is missing the internal sealed KMS data key for SSE-S3")
}
// Check whether all extracted values are well-formed
iv, err := base64.StdEncoding.DecodeString(b64IV)
if err != nil || len(iv) != 32 {
return keyID, kmsKey, sealedKey, errInvalidInternalIV
return keyID, kmsKey, sealedKey, ctx, errInvalidInternalIV
}
if algorithm != SealAlgorithm {
return keyID, kmsKey, sealedKey, errInvalidInternalSealAlgorithm
return keyID, kmsKey, sealedKey, ctx, errInvalidInternalSealAlgorithm
}
encryptedKey, err := base64.StdEncoding.DecodeString(b64SealedKey)
if err != nil || len(encryptedKey) != 64 {
return keyID, kmsKey, sealedKey, Errorf("The internal sealed key for SSE-S3 is invalid")
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal sealed key for SSE-KMS is invalid")
}
if idPresent && kmsKeyPresent { // We are using a KMS -> parse the sealed KMS data key.
kmsKey, err = base64.StdEncoding.DecodeString(b64KMSSealedKey)
if err != nil {
return keyID, kmsKey, sealedKey, Errorf("The internal sealed KMS data key for SSE-S3 is invalid")
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal sealed KMS data key for SSE-KMS is invalid")
}
}
b64Ctx, ok := metadata[MetaContext]
if ok {
b, err := base64.StdEncoding.DecodeString(b64Ctx)
if err != nil {
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal KMS context is not base64-encoded")
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(b, ctx); err != nil {
return keyID, kmsKey, sealedKey, ctx, Errorf("The internal sealed KMS context is invalid")
}
}
sealedKey.Algorithm = algorithm
copy(sealedKey.IV[:], iv)
copy(sealedKey.Key[:], encryptedKey)
return keyID, kmsKey, sealedKey, nil
return keyID, kmsKey, sealedKey, ctx, nil
}

View File

@@ -62,12 +62,6 @@ func (sses3) IsEncrypted(metadata map[string]string) bool {
if _, ok := metadata[MetaSealedKeyS3]; ok {
return true
}
if _, ok := metadata[MetaKeyID]; ok {
return true
}
if _, ok := metadata[MetaDataEncryptionKey]; ok {
return true
}
return false
}

View File

@@ -223,7 +223,10 @@ func (v *vaultService) CreateKey(keyID string) error {
// named key referenced by keyID. It also binds the generated key
// cryptographically to the provided context.
func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sealedKey []byte, err error) {
context := ctx.AppendTo(make([]byte, 0, 128))
context, err := ctx.MarshalText()
if err != nil {
return key, sealedKey, err
}
payload := map[string]interface{}{
"context": base64.StdEncoding.EncodeToString(context),
@@ -258,7 +261,10 @@ func (v *vaultService) GenerateKey(keyID string, ctx Context) (key [32]byte, sea
// The context must be same context as the one provided while
// generating the plaintext key / sealedKey.
func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (key [32]byte, err error) {
context := ctx.AppendTo(make([]byte, 0, 128))
context, err := ctx.MarshalText()
if err != nil {
return key, err
}
payload := map[string]interface{}{
"ciphertext": string(sealedKey),
@@ -282,29 +288,3 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
copy(key[:], plainKey)
return key, nil
}
// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID
// has been changed by the KMS operator - i.e. the master key has been rotated.
// If the master key hasn't changed since the sealedKey has been created / updated
// it may return the same sealedKey as rotatedKey.
//
// The context must be same context as the one provided while
// generating the plaintext key / sealedKey.
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
context := ctx.AppendTo(make([]byte, 0, 128))
payload := map[string]interface{}{
"ciphertext": string(sealedKey),
"context": base64.StdEncoding.EncodeToString(context),
}
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
if err != nil {
return nil, Errorf("crypto: client error %w", err)
}
ciphertext, ok := s.Data["ciphertext"]
if !ok {
return nil, errMissingUpdatedKey
}
rotatedKey = []byte(ciphertext.(string))
return rotatedKey, nil
}

View File

@@ -23,6 +23,7 @@ import (
"errors"
"math"
"math/rand"
"net/http"
"os"
"path"
"strings"
@@ -42,9 +43,9 @@ import (
)
const (
dataCrawlSleepPerFolder = time.Millisecond // Time to wait between folders.
dataCrawlStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
dataScannerSleepPerFolder = time.Millisecond // Time to wait between folders.
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
dataUsageUpdateDirCycles = 16 // Visit all folders every n cycles.
healDeleteDangling = true
healFolderIncludeProb = 32 // Include a clean folder one in n cycles.
@@ -55,27 +56,28 @@ var (
globalHealConfig heal.Config
globalHealConfigMu sync.Mutex
dataCrawlerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
// Sleeper values are updated when config is loaded.
crawlerSleeper = newDynamicSleeper(10, 10*time.Second)
scannerSleeper = newDynamicSleeper(10, 10*time.Second)
)
// initDataCrawler will start the crawler in the background.
func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
go runDataCrawler(ctx, objAPI)
// initDataScanner will start the scanner in the background.
func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
go runDataScanner(ctx, objAPI)
}
// runDataCrawler will start a data crawler.
// runDataScanner will start a data scanner.
// The function will block until the context is canceled.
// There should only ever be one crawler running per cluster.
func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
// Make sure only 1 crawler is running on the cluster.
locker := objAPI.NewNSLock(minioMetaBucket, "runDataCrawler.lock")
// There should only ever be one scanner running per cluster.
func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
var err error
// Make sure only 1 scanner is running on the cluster.
locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for {
err := locker.GetLock(ctx, dataCrawlerLeaderLockTimeout)
ctx, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
if err != nil {
time.Sleep(time.Duration(r.Float64() * float64(dataCrawlStartDelay)))
time.Sleep(time.Duration(r.Float64() * float64(dataScannerStartDelay)))
continue
}
break
@@ -84,31 +86,34 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
// Load current bloom cycle
nextBloomCycle := intDataUpdateTracker.current() + 1
var buf bytes.Buffer
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageBloomName, 0, -1, &buf, "", ObjectOptions{})
br, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageBloomName, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
if !isErrObjectNotFound(err) && !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
} else {
if buf.Len() == 8 {
nextBloomCycle = binary.LittleEndian.Uint64(buf.Bytes())
if br.ObjInfo.Size == 8 {
if err = binary.Read(br, binary.LittleEndian, &nextBloomCycle); err != nil {
logger.LogIf(ctx, err)
}
}
br.Close()
}
crawlTimer := time.NewTimer(dataCrawlStartDelay)
defer crawlTimer.Stop()
scannerTimer := time.NewTimer(dataScannerStartDelay)
defer scannerTimer.Stop()
for {
select {
case <-ctx.Done():
return
case <-crawlTimer.C:
case <-scannerTimer.C:
// Reset the timer for next cycle.
crawlTimer.Reset(dataCrawlStartDelay)
scannerTimer.Reset(dataScannerStartDelay)
if intDataUpdateTracker.debug {
console.Debugln("starting crawler cycle")
console.Debugln("starting scanner cycle")
}
// Wait before starting next cycle and wait on startup.
@@ -116,7 +121,7 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
go storeDataUsageInBackend(ctx, objAPI, results)
bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle)
logger.LogIf(ctx, err)
err = objAPI.CrawlAndGetDataUsage(ctx, bf, results)
err = objAPI.NSScanner(ctx, bf, results)
close(results)
logger.LogIf(ctx, err)
if err == nil {
@@ -124,13 +129,13 @@ func runDataCrawler(ctx context.Context, objAPI ObjectLayer) {
nextBloomCycle++
var tmp [8]byte
binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle)
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)), false)
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)))
if err != nil {
logger.LogIf(ctx, err)
continue
}
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r, nil, nil), ObjectOptions{})
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r), ObjectOptions{})
if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
@@ -152,27 +157,27 @@ type folderScanner struct {
newCache dataUsageCache
withFilter *bloomFilter
dataUsageCrawlDebug bool
healFolderInclude uint32 // Include a clean folder one in n cycles.
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
dataUsageScannerDebug bool
healFolderInclude uint32 // Include a clean folder one in n cycles.
healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
newFolders []cachedFolder
existingFolders []cachedFolder
disks []StorageAPI
}
// crawlDataFolder will crawl the basepath+cache.Info.Name and return an updated cache.
// scanDataFolder will scanner the basepath+cache.Info.Name and return an updated cache.
// The returned cache will always be valid, but may not be updated from the existing.
// Before each operation sleepDuration is called which can be used to temporarily halt the crawler.
// Before each operation sleepDuration is called which can be used to temporarily halt the scanner.
// If the supplied context is canceled the function will return at the first chance.
func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) {
func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) {
t := UTCNow()
logPrefix := color.Green("data-usage: ")
logSuffix := color.Blue("- %v + %v", basePath, cache.Info.Name)
if intDataUpdateTracker.debug {
defer func() {
console.Debugf(logPrefix+" Crawl time: %v %s\n", time.Since(t), logSuffix)
console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix)
}()
}
@@ -185,15 +190,15 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
skipHeal := cache.Info.SkipHealing
s := folderScanner{
root: basePath,
getSize: getSize,
oldCache: cache,
newCache: dataUsageCache{Info: cache.Info},
newFolders: nil,
existingFolders: nil,
dataUsageCrawlDebug: intDataUpdateTracker.debug,
healFolderInclude: 0,
healObjectSelect: 0,
root: basePath,
getSize: getSize,
oldCache: cache,
newCache: dataUsageCache{Info: cache.Info},
newFolders: nil,
existingFolders: nil,
dataUsageScannerDebug: intDataUpdateTracker.debug,
healFolderInclude: 0,
healObjectSelect: 0,
}
// Add disks for set healing.
@@ -223,21 +228,21 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
s.withFilter = nil
}
}
if s.dataUsageCrawlDebug {
console.Debugf(logPrefix+"Start crawling. Bloom filter: %v %s\n", s.withFilter != nil, logSuffix)
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"Start scanning. Bloom filter: %v %s\n", s.withFilter != nil, logSuffix)
}
done := ctx.Done()
var flattenLevels = 2
if s.dataUsageCrawlDebug {
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"Cycle: %v, Entries: %v %s\n", cache.Info.NextCycle, len(cache.Cache), logSuffix)
}
// Always scan flattenLevels deep. Cache root is level 0.
todo := []cachedFolder{{name: cache.Info.Name, objectHealProbDiv: 1}}
for i := 0; i < flattenLevels; i++ {
if s.dataUsageCrawlDebug {
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"Level %v, scanning %v directories. %s\n", i, len(todo), logSuffix)
}
select {
@@ -253,7 +258,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
}
}
if s.dataUsageCrawlDebug {
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"New folders: %v %s\n", s.newFolders, logSuffix)
}
@@ -282,7 +287,7 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
}
}
if s.dataUsageCrawlDebug {
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"Existing folders: %v %s\n", len(s.existingFolders), logSuffix)
}
@@ -309,13 +314,13 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
// If folder isn't in filter, skip it completely.
if !s.withFilter.containsDir(folder.name) {
if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) {
if s.dataUsageCrawlDebug {
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"Skipping non-updated folder: %v %s\n", folder, logSuffix)
}
s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()])
continue
} else {
if s.dataUsageCrawlDebug {
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"Adding non-updated folder to heal check: %v %s\n", folder.name, logSuffix)
}
// Update probability of including objects
@@ -337,8 +342,8 @@ func crawlDataFolder(ctx context.Context, basePath string, cache dataUsageCache,
}
s.newCache.replaceHashed(h, folder.parent, *du)
}
if s.dataUsageCrawlDebug {
console.Debugf(logPrefix+"Finished crawl, %v entries %s\n", len(s.newCache.Cache), logSuffix)
if s.dataUsageScannerDebug {
console.Debugf(logPrefix+"Finished scanner, %v entries %s\n", len(s.newCache.Cache), logSuffix)
}
s.newCache.Info.LastUpdate = UTCNow()
s.newCache.Info.NextCycle++
@@ -367,7 +372,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
_, prefix := path2BucketObjectWithBasePath(f.root, folder.name)
var activeLifeCycle *lifecycle.Lifecycle
if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(scannerLogPrefix+" Prefix %q has active rules\n", prefix)
}
activeLifeCycle = f.oldCache.Info.lifeCycle
@@ -378,20 +383,20 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
if folder.name != dataUsageRoot && !filter.containsDir(folder.name) {
if !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) {
f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent)
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(scannerLogPrefix+" Skipping non-updated folder: %v\n", folder.name)
}
continue
} else {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(scannerLogPrefix+" Adding non-updated folder to heal check: %v\n", folder.name)
}
// If probability was already crawlerHealFolderInclude, keep it.
// If probability was already scannerHealFolderInclude, keep it.
folder.objectHealProbDiv = f.healFolderInclude
}
}
}
crawlerSleeper.Sleep(ctx, dataCrawlSleepPerFolder)
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
cache := dataUsageEntry{}
@@ -400,22 +405,22 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
entName = path.Clean(path.Join(folder.name, entName))
bucket, prefix := path2BucketObjectWithBasePath(f.root, entName)
if bucket == "" {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName)
}
return nil
return errDoneForNow
}
if isReservedOrInvalidBucket(bucket, false) {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(scannerLogPrefix+" invalid bucket: %v, entry: %v\n", bucket, entName)
}
return nil
return errDoneForNow
}
select {
case <-done:
return ctx.Err()
return errDoneForNow
default:
}
@@ -443,16 +448,16 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
}
// Dynamic time delay.
wait := crawlerSleeper.Timer(ctx)
wait := scannerSleeper.Timer(ctx)
// Get file size, ignore errors.
item := crawlItem{
item := scannerItem{
Path: path.Join(f.root, entName),
Typ: typ,
bucket: bucket,
prefix: path.Dir(prefix),
objectName: path.Base(entName),
debug: f.dataUsageCrawlDebug,
debug: f.dataUsageScannerDebug,
lifeCycle: activeLifeCycle,
heal: thisHash.mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
}
@@ -528,17 +533,18 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
healObjectsPrefix := color.Green("healObjects:")
for k := range existing {
bucket, prefix := path2BucketObject(k)
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(scannerLogPrefix+" checking disappeared folder: %v/%v\n", bucket, prefix)
}
// Dynamic time delay.
wait := crawlerSleeper.Timer(ctx)
wait := scannerSleeper.Timer(ctx)
resolver.bucket = bucket
foundObjs := false
dangling := false
ctx, cancel := context.WithCancel(ctx)
err := listPathRaw(ctx, listPathRawOptions{
disks: f.disks,
bucket: bucket,
@@ -548,13 +554,13 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
minDisks: len(f.disks), // We want full consistency.
// Weird, maybe transient error.
agreed: func(entry metaCacheEntry) {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name)
}
},
// Some disks have data for this.
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(healObjectsPrefix+" got partial, %d agreed, errs: %v\n", nAgreed, errs)
}
@@ -563,7 +569,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
// Sleep and reset.
wait()
wait = crawlerSleeper.Timer(ctx)
wait = scannerSleeper.Timer(ctx)
entry, ok := entries.resolve(&resolver)
if !ok {
for _, err := range errs {
@@ -576,7 +582,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
entry, _ = entries.firstFound()
}
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(healObjectsPrefix+" resolved to: %v, dir: %v\n", entry.name, entry.isDir())
}
@@ -600,7 +606,7 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
for _, ver := range fiv.Versions {
// Sleep and reset.
wait()
wait = crawlerSleeper.Timer(ctx)
wait = scannerSleeper.Timer(ctx)
err := bgSeq.queueHealTask(healSource{
bucket: bucket,
object: fiv.Name,
@@ -614,31 +620,31 @@ func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFo
},
// Too many disks failed.
finished: func(errs []error) {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(healObjectsPrefix+" too many errors: %v\n", errs)
}
cancel()
},
})
if f.dataUsageCrawlDebug && err != nil && err != errFileNotFound {
if f.dataUsageScannerDebug && err != nil && err != errFileNotFound {
console.Debugf(healObjectsPrefix+" checking returned value %v (%T)\n", err, err)
}
// If we found one or more disks with this folder, delete it.
if err == nil && dangling {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(healObjectsPrefix+" deleting dangling directory %s\n", prefix)
}
objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{
Recursive: true,
Remove: true,
Remove: healDeleteDangling,
},
func(bucket, object, versionID string) error {
// Wait for each heal as per crawler frequency.
// Wait for each heal as per scanner frequency.
wait()
wait = crawlerSleeper.Timer(ctx)
wait = scannerSleeper.Timer(ctx)
return bgSeq.queueHealTask(healSource{
bucket: bucket,
object: object,
@@ -678,7 +684,7 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
addDir = func(entName string, typ os.FileMode) error {
select {
case <-done:
return ctx.Err()
return errDoneForNow
default:
}
@@ -686,12 +692,12 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
dirStack = append(dirStack, entName)
err := readDirFn(path.Join(dirStack...), addDir)
dirStack = dirStack[:len(dirStack)-1]
crawlerSleeper.Sleep(ctx, dataCrawlSleepPerFolder)
scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder)
return err
}
// Dynamic time delay.
wait := crawlerSleeper.Timer(ctx)
wait := scannerSleeper.Timer(ctx)
// Get file size, ignore errors.
dirStack = append(dirStack, entName)
@@ -701,19 +707,19 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
bucket, prefix := path2BucketObjectWithBasePath(f.root, fileName)
var activeLifeCycle *lifecycle.Lifecycle
if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, false) {
if f.dataUsageCrawlDebug {
if f.dataUsageScannerDebug {
console.Debugf(deepScannerLogPrefix+" Prefix %q has active rules\n", prefix)
}
activeLifeCycle = f.oldCache.Info.lifeCycle
}
item := crawlItem{
item := scannerItem{
Path: fileName,
Typ: typ,
bucket: bucket,
prefix: path.Dir(prefix),
objectName: path.Base(entName),
debug: f.dataUsageCrawlDebug,
debug: f.dataUsageScannerDebug,
lifeCycle: activeLifeCycle,
heal: hashPath(path.Join(prefix, entName)).mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure,
}
@@ -748,8 +754,8 @@ func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder,
return &cache, nil
}
// crawlItem represents each file while walking.
type crawlItem struct {
// scannerItem represents each file while walking.
type scannerItem struct {
Path string
Typ os.FileMode
@@ -769,10 +775,10 @@ type sizeSummary struct {
replicaSize int64
}
type getSizeFn func(item crawlItem) (sizeSummary, error)
type getSizeFn func(item scannerItem) (sizeSummary, error)
// transformMetaDir will transform a directory to prefix/file.ext
func (i *crawlItem) transformMetaDir() {
func (i *scannerItem) transformMetaDir() {
split := strings.Split(i.prefix, SlashSeparator)
if len(split) > 1 {
i.prefix = path.Join(split[:len(split)-1]...)
@@ -791,42 +797,39 @@ type actionMeta struct {
var applyActionsLogPrefix = color.Green("applyActions:")
// applyActions will apply lifecycle checks on to a scanned item.
// The resulting size on disk will always be returned.
// The metadata will be compared to consensus on the object layer before any changes are applied.
// If no metadata is supplied, -1 is returned if no action is taken.
func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) {
if i.debug {
if meta.oi.VersionID != "" {
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
} else {
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
}
}
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
if meta.bitRotScan {
healOpts.ScanMode = madmin.HealDeepScan
}
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return 0
}
if err != nil && !errors.Is(err, NotImplemented{}) {
logger.LogIf(ctx, err)
return 0
}
return res.ObjectSize
}
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) {
size, err := meta.oi.GetActualSize()
if i.debug {
logger.LogIf(ctx, err)
}
if i.heal {
if i.debug {
if meta.oi.VersionID != "" {
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID)
} else {
console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath())
}
}
healOpts := madmin.HealOpts{Remove: healDeleteDangling}
if meta.bitRotScan {
healOpts.ScanMode = madmin.HealDeepScan
}
res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts)
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return 0
}
if err != nil && !errors.Is(err, NotImplemented{}) {
logger.LogIf(ctx, err)
return 0
}
size = res.ObjectSize
}
if i.lifeCycle == nil {
if i.debug {
console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
}
return size
return false, size
}
versionID := meta.oi.VersionID
@@ -860,7 +863,7 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
if i.debug {
console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath())
}
return size
return false, size
}
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
@@ -872,26 +875,46 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
if !obj.DeleteMarker { // if this is not a delete marker log and return
// Do nothing - heal in the future.
logger.LogIf(ctx, err)
return size
return false, size
}
case ObjectNotFound, VersionNotFound:
// object not found or version not found return 0
return 0
return false, 0
default:
// All other errors proceed.
logger.LogIf(ctx, err)
return size
return false, size
}
}
var applied bool
action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug)
if action != lifecycle.NoneAction {
applied = applyLifecycleAction(ctx, action, o, obj)
}
if applied {
return 0
switch action {
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
return true, size
}
// For all other lifecycle actions that remove data
return true, 0
}
return false, size
}
// applyActions will apply lifecycle checks on to a scanned item.
// The resulting size on disk will always be returned.
// The metadata will be compared to consensus on the object layer before any changes are applied.
// If no metadata is supplied, -1 is returned if no action is taken.
func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta) int64 {
applied, size := i.applyLifecycle(ctx, o, meta)
// For instance, an applied lifecycle means we remove/transitioned an object
// from the current deployment, which means we don't have to call healing
// routine even if we are asked to do via heal flag.
if !applied && i.heal {
size = i.applyHealing(ctx, o, meta)
}
return size
}
@@ -990,10 +1013,12 @@ func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer,
return true
}
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo) bool {
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, applyOnVersion bool) bool {
opts := ObjectOptions{}
opts.VersionID = obj.VersionID
if applyOnVersion {
opts.VersionID = obj.VersionID
}
if opts.VersionID == "" {
opts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket)
}
@@ -1025,20 +1050,20 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
}
// Apply object, object version, restored object or restored object version action on the given object
func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool {
func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject, applyOnVersion bool) bool {
if obj.TransitionStatus != "" {
return applyExpiryOnTransitionedObject(ctx, objLayer, obj, restoredObject)
}
return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj)
return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj, applyOnVersion)
}
// Perform actions (removal of transitioning of objects), return true the action is successfully performed
// Perform actions (removal or transitioning of objects), return true the action is successfully performed
func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) (success bool) {
switch action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
success = applyExpiryRule(ctx, objLayer, obj, false)
success = applyExpiryRule(ctx, objLayer, obj, false, action == lifecycle.DeleteVersionAction)
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
success = applyExpiryRule(ctx, objLayer, obj, true)
success = applyExpiryRule(ctx, objLayer, obj, true, action == lifecycle.DeleteRestoredVersionAction)
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
success = applyTransitionAction(ctx, action, objLayer, obj)
}
@@ -1046,12 +1071,12 @@ func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer
}
// objectPath returns the prefix and object name.
func (i *crawlItem) objectPath() string {
func (i *scannerItem) objectPath() string {
return path.Join(i.prefix, i.objectName)
}
// healReplication will heal a scanned item that has failed replication.
func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) {
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
// heal delete marker replication failure or versioned delete replication failure
if oi.ReplicationStatus == replication.Pending ||
@@ -1064,10 +1089,10 @@ func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, oi Objec
switch oi.ReplicationStatus {
case replication.Pending:
sizeS.pendingSize += oi.Size
globalReplicationState.queueReplicaTask(oi)
globalReplicationPool.queueReplicaTask(oi)
case replication.Failed:
sizeS.failedSize += oi.Size
globalReplicationState.queueReplicaTask(oi)
globalReplicationPool.queueReplicaTask(oi)
case replication.Completed, "COMPLETE":
sizeS.replicatedSize += oi.Size
case replication.Replica:
@@ -1076,7 +1101,7 @@ func (i *crawlItem) healReplication(ctx context.Context, o ObjectLayer, oi Objec
}
// healReplicationDeletes will heal a scanned deleted item that failed to replicate deletes.
func (i *crawlItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, oi ObjectInfo) {
func (i *scannerItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, oi ObjectInfo) {
// handle soft delete and permanent delete failures here.
if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() {
versionID := ""
@@ -1086,7 +1111,7 @@ func (i *crawlItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, o
} else {
versionID = oi.VersionID
}
globalReplicationState.queueReplicaDeleteTask(DeletedObjectVersionInfo{
globalReplicationPool.queueReplicaDeleteTask(DeletedObjectVersionInfo{
DeletedObject: DeletedObject{
ObjectName: oi.Name,
DeleteMarkerVersionID: dmVersionID,

View File

@@ -80,7 +80,7 @@ func newDataUpdateTracker() *dataUpdateTracker {
Current: dataUpdateFilter{
idx: 1,
},
debug: env.Get(envDataUsageCrawlDebug, config.EnableOff) == config.EnableOn || serverDebugLog,
debug: env.Get(envDataUsageScannerDebug, config.EnableOff) == config.EnableOn || serverDebugLog,
input: make(chan string, dataUpdateTrackerQueueSize),
save: make(chan struct{}, 1),
saveExited: make(chan struct{}),

View File

@@ -91,7 +91,7 @@ type dataUsageCacheInfo struct {
Name string
LastUpdate time.Time
NextCycle uint32
// indicates if the disk is being healed and crawler
// indicates if the disk is being healed and scanner
// should skip healing the disk
SkipHealing bool
BloomFilter []byte `msg:"BloomFilter,omitempty"`
@@ -485,7 +485,7 @@ type objectIO interface {
// Only backend errors are returned as errors.
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, noLock, ObjectOptions{})
if err != nil {
switch err.(type) {
case ObjectNotFound:
@@ -513,7 +513,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
}()
defer pr.Close()
r, err := hash.NewReader(pr, -1, "", "", -1, false)
r, err := hash.NewReader(pr, -1, "", "", -1)
if err != nil {
return err
}
@@ -521,7 +521,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
_, err = store.PutObject(ctx,
dataUsageBucket,
name,
NewPutObjReader(r, nil, nil),
NewPutObjReader(r),
ObjectOptions{})
if isErrBucketNotFound(err) {
return nil

View File

@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"encoding/json"
"net/http"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/logger"
@@ -27,7 +28,7 @@ import (
)
const (
envDataUsageCrawlDebug = "MINIO_DISK_USAGE_CRAWL_DEBUG"
envDataUsageScannerDebug = "MINIO_DISK_USAGE_SCANNER_DEBUG"
dataUsageRoot = SlashSeparator
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
@@ -46,12 +47,12 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
continue
}
size := int64(len(dataUsageJSON))
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size, false)
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size)
if err != nil {
logger.LogIf(ctx, err)
continue
}
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{})
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r), ObjectOptions{})
if !isErrBucketNotFound(err) {
logger.LogIf(ctx, err)
}
@@ -59,20 +60,18 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
}
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
var dataUsageInfoJSON bytes.Buffer
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
r, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageObjName, nil, http.Header{}, readLock, ObjectOptions{})
if err != nil {
if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
return DataUsageInfo{}, nil
}
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
}
defer r.Close()
var dataUsageInfo DataUsageInfo
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(dataUsageInfoJSON.Bytes(), &dataUsageInfo)
if err != nil {
if err = json.NewDecoder(r).Decode(&dataUsageInfo); err != nil {
return DataUsageInfo{}, err
}

View File

@@ -51,7 +51,7 @@ func TestDataUsageUpdate(t *testing.T) {
}
createUsageTestFiles(t, base, bucket, files)
getSize := func(item crawlItem) (sizeS sizeSummary, err error) {
getSize := func(item scannerItem) (sizeS sizeSummary, err error) {
if item.Typ&os.ModeDir == 0 {
var s os.FileInfo
s, err = os.Stat(item.Path)
@@ -64,7 +64,7 @@ func TestDataUsageUpdate(t *testing.T) {
return
}
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
if err != nil {
t.Fatal(err)
}
@@ -185,7 +185,7 @@ func TestDataUsageUpdate(t *testing.T) {
},
}
createUsageTestFiles(t, base, bucket, files)
got, err = crawlDataFolder(context.Background(), base, got, getSize)
got, err = scanDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -270,7 +270,7 @@ func TestDataUsageUpdate(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = crawlDataFolder(context.Background(), base, got, getSize)
got, err = scanDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -347,7 +347,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
createUsageTestFiles(t, base, "", files)
getSize := func(item crawlItem) (sizeS sizeSummary, err error) {
getSize := func(item scannerItem) (sizeS sizeSummary, err error) {
if item.Typ&os.ModeDir == 0 {
var s os.FileInfo
s, err = os.Stat(item.Path)
@@ -359,7 +359,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
return
}
got, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize)
got, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize)
if err != nil {
t.Fatal(err)
}
@@ -469,7 +469,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
},
}
createUsageTestFiles(t, base, "", files)
got, err = crawlDataFolder(context.Background(), base, got, getSize)
got, err = scanDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -552,7 +552,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = crawlDataFolder(context.Background(), base, got, getSize)
got, err = scanDataFolder(context.Background(), base, got, getSize)
if err != nil {
t.Fatal(err)
}
@@ -646,7 +646,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
}
createUsageTestFiles(t, base, bucket, files)
getSize := func(item crawlItem) (sizeS sizeSummary, err error) {
getSize := func(item scannerItem) (sizeS sizeSummary, err error) {
if item.Typ&os.ModeDir == 0 {
var s os.FileInfo
s, err = os.Stat(item.Path)
@@ -658,7 +658,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
}
return
}
want, err := crawlDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
want, err := scanDataFolder(context.Background(), base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize)
if err != nil {
t.Fatal(err)
}

View File

@@ -23,7 +23,6 @@ import (
"crypto/rand"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -103,7 +102,6 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
}
// We set file info only if its valid.
o.ModTime = m.Stat.ModTime
o.Size = m.Stat.Size
o.ETag = extractETag(m.Meta)
o.ContentType = m.Meta["content-type"]
@@ -122,6 +120,12 @@ func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) {
o.Expires = t.UTC()
}
}
if mtime, ok := m.Meta["last-modified"]; ok {
if t, e = time.Parse(http.TimeFormat, mtime); e == nil {
o.ModTime = t.UTC()
}
}
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of user-defined metadata
o.UserDefined = cleanMetadata(m.Meta)
@@ -264,10 +268,6 @@ func (c *diskCache) toClear() uint64 {
return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark), uint64(c.highWatermark))
}
var (
errDoneForNow = errors.New("done for now")
)
func (c *diskCache) purgeWait(ctx context.Context) {
for {
select {
@@ -377,7 +377,7 @@ func (c *diskCache) purge(ctx context.Context) {
return nil
}
if err := readDirFilterFn(c.dir, filterFn); err != nil {
if err := readDirFn(c.dir, filterFn); err != nil {
logger.LogIf(ctx, err)
return
}
@@ -437,7 +437,7 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
cLock := c.NewNSLockFn(cacheObjPath)
if err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
if ctx, err = cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
return
}
@@ -506,9 +506,7 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
}
// get metadata of part.1 if full file has been cached.
partial = true
fi, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile))
if err == nil {
meta.Stat.ModTime = atime.Get(fi)
if _, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)); err == nil {
partial = false
}
return meta, partial, meta.Hits, nil
@@ -517,9 +515,11 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
// saves object metadata to disk cache
// incHitsOnly is true if metadata update is incrementing only the hit counter
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
var err error
cachedPath := getCacheSHADir(c.dir, bucket, object)
cLock := c.NewNSLockFn(cachedPath)
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
defer cLock.Unlock()
@@ -570,7 +570,6 @@ func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, met
}
}
m.Stat.Size = actualSize
m.Stat.ModTime = UTCNow()
if !incHitsOnly {
// reset meta
m.Meta = meta
@@ -697,7 +696,8 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
}
cachePath := getCacheSHADir(c.dir, bucket, object)
cLock := c.NewNSLockFn(cachePath)
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
ctx, err = cLock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return oi, err
}
defer cLock.Unlock()
@@ -911,7 +911,8 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
cLock := c.NewNSLockFn(cacheObjPath)
if err := cLock.GetRLock(ctx, globalOperationTimeout); err != nil {
ctx, err = cLock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
return nil, numHits, err
}
@@ -975,7 +976,8 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
// Deletes the cached object
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
cLock := c.NewNSLockFn(cacheObjPath)
if err := cLock.GetLock(ctx, globalOperationTimeout); err != nil {
_, err = cLock.GetLock(ctx, globalOperationTimeout)
if err != nil {
return err
}
defer cLock.Unlock()
@@ -1023,7 +1025,7 @@ func (c *diskCache) scanCacheWritebackFailures(ctx context.Context) {
return nil
}
if err := readDirFilterFn(c.dir, filterFn); err != nil {
if err := readDirFn(c.dir, filterFn); err != nil {
logger.LogIf(ctx, err)
return
}

View File

@@ -173,7 +173,11 @@ func backendDownError(err error) bool {
// IsCacheable returns if the object should be saved in the cache.
func (o ObjectInfo) IsCacheable() bool {
return !crypto.IsEncrypted(o.UserDefined) || globalCacheKMS != nil
if globalCacheKMS != nil {
return true
}
_, ok := crypto.IsEncrypted(o.UserDefined)
return !ok
}
// reads file cached on disk from offset upto length

View File

@@ -200,6 +200,7 @@ func getMetadata(objInfo ObjectInfo) map[string]string {
if !objInfo.Expires.Equal(timeSentinel) {
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
}
metadata["last-modified"] = objInfo.ModTime.Format(http.TimeFormat)
for k, v := range objInfo.UserDefined {
metadata[k] = v
}
@@ -704,14 +705,14 @@ func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) {
if st == CommitComplete || st.String() == "" {
return
}
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size, globalCLIContext.StrictS3Compat)
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size)
if err != nil {
return
}
var opts ObjectOptions
opts.UserDefined = make(map[string]string)
opts.UserDefined[xhttp.ContentMD5] = oi.UserDefined["content-md5"]
objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader, nil, nil), opts)
objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader), opts)
wbCommitStatus := CommitComplete
if err != nil {
wbCommitStatus = CommitFailed

View File

@@ -18,6 +18,7 @@ package cmd
import (
"testing"
"time"
)
// Tests ToObjectInfo function.
@@ -27,7 +28,7 @@ func TestCacheMetadataObjInfo(t *testing.T) {
if objInfo.Size != 0 {
t.Fatal("Unexpected object info value for Size", objInfo.Size)
}
if !objInfo.ModTime.Equal(timeSentinel) {
if !objInfo.ModTime.Equal(time.Time{}) {
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
}
if objInfo.IsDir {

View File

@@ -20,6 +20,7 @@ import (
"bufio"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"crypto/subtle"
"encoding/binary"
"encoding/hex"
@@ -33,7 +34,6 @@ import (
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
sha256 "github.com/minio/sha256-simd"
"github.com/minio/sio"
)
@@ -247,56 +247,45 @@ func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, m
}
func decryptObjectInfo(key []byte, bucket, object string, metadata map[string]string) ([]byte, error) {
switch {
default:
return nil, errObjectTampered
case crypto.S3.IsEncrypted(metadata) && isCacheEncrypted(metadata):
if globalCacheKMS == nil {
switch kind, _ := crypto.IsEncrypted(metadata); kind {
case crypto.S3:
var KMS crypto.KMS = GlobalKMS
if isCacheEncrypted(metadata) {
KMS = globalCacheKMS
}
if KMS == nil {
return nil, errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
objectKey, err := crypto.S3.UnsealObjectKey(KMS, metadata, bucket, object)
if err != nil {
return nil, err
}
extKey, err := globalCacheKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return nil, err
}
var objectKey crypto.ObjectKey
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, object); err != nil {
return nil, err
}
return objectKey[:], nil
case crypto.S3.IsEncrypted(metadata):
case crypto.S3KMS:
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
keyID, kmsKey, sealedKey, err := crypto.S3.ParseMetadata(metadata)
objectKey, err := crypto.S3KMS.UnsealObjectKey(GlobalKMS, metadata, bucket, object)
if err != nil {
return nil, err
}
extKey, err := GlobalKMS.UnsealKey(keyID, kmsKey, crypto.Context{bucket: path.Join(bucket, object)})
if err != nil {
return nil, err
}
var objectKey crypto.ObjectKey
if err = objectKey.Unseal(extKey, sealedKey, crypto.S3.String(), bucket, object); err != nil {
return nil, err
}
return objectKey[:], nil
case crypto.SSEC.IsEncrypted(metadata):
var extKey [32]byte
copy(extKey[:], key)
case crypto.SSEC:
sealedKey, err := crypto.SSEC.ParseMetadata(metadata)
if err != nil {
return nil, err
}
var objectKey crypto.ObjectKey
var (
objectKey crypto.ObjectKey
extKey [32]byte
)
copy(extKey[:], key)
if err = objectKey.Unseal(extKey, sealedKey, crypto.SSEC.String(), bucket, object); err != nil {
return nil, err
}
return objectKey[:], nil
default:
return nil, errObjectTampered
}
}
@@ -516,7 +505,7 @@ func (d *DecryptBlocksReader) Read(p []byte) (int, error) {
// It returns an error if the object is not encrypted or marked as encrypted
// but has an invalid size.
func (o *ObjectInfo) DecryptedSize() (int64, error) {
if !crypto.IsEncrypted(o.UserDefined) {
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
return 0, errors.New("Cannot compute decrypted size of an unencrypted object")
}
if !isEncryptedMultipart(*o) {
@@ -649,7 +638,7 @@ func tryDecryptETag(key []byte, encryptedETag string, ssec bool) string {
// requested range starts, along with the DARE sequence number within
// that part. For single part objects, the partStart will be 0.
func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) {
if !crypto.IsEncrypted(o.UserDefined) {
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
err = errors.New("Object is not encrypted")
return
}
@@ -796,7 +785,7 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
}
}
encrypted = crypto.IsEncrypted(info.UserDefined)
_, encrypted = crypto.IsEncrypted(info.UserDefined)
if !encrypted && crypto.SSEC.IsRequested(headers) && r.Header.Get(xhttp.AmzCopySource) == "" {
return false, errInvalidEncryptionParameters
}
@@ -818,7 +807,7 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
return encrypted, err
}
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
if _, ok := crypto.IsEncrypted(info.UserDefined); ok && !crypto.IsMultiPart(info.UserDefined) {
info.ETag = getDecryptedETag(headers, *info, false)
}
}

View File

@@ -129,7 +129,7 @@ func TestDecryptObjectInfo(t *testing.T) {
for i, test := range decryptObjectInfoTests {
if encrypted, err := DecryptObjectInfo(&test.info, test.request); err != test.expErr {
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
} else if _, enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
} else if !encrypted && enc != encrypted {
t.Errorf("Test %d: Decryption thinks object is not encrypted but it is", i)

Some files were not shown because too many files have changed in this diff Show More