Compare commits

...

22 Commits

Author SHA1 Message Date
jiuker
a6c538c5a1 fix: honor renamePart's PathNotFound (#21378) 2025-06-13 04:33:47 -07:00
jiuker
e1fcaebc77 fix: when ListMultipartUploads append result from cache should filter with bucket (#21376) 2025-06-12 00:09:12 -07:00
Johannes Horn
21409f112d add networkpolicy for job and add possibility to define egress ports (#20951) 2025-06-08 09:14:18 -07:00
Sung Jeon
417c8648f0 use provided region in tier configuration for S3 backend (#21365)
fixes #21364
2025-06-08 09:13:30 -07:00
ffgan
e2245a0b12 allow cross-compiling support for RISC-V 64 (#21348)
this is minor PR that supports building on RISC-V 64,
this PR is for compilation only. There is no guarantee 
that code is tested and will work in production.
2025-06-08 09:12:05 -07:00
Shubhendu
b4b3d208dd Add targetArn label for bucket replication metrics (#21354)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2025-06-04 13:45:31 -07:00
ILIYA
0a36d41dcd modernizes for loop in cmd/, internal/ (#21309) 2025-05-27 08:19:03 -07:00
jiuker
ea77bcfc98 fix: panic for TestListObjectsWithILM (#21322) 2025-05-27 08:18:36 -07:00
jiuker
9f24ca5d66 fix: empty fileName cause Reader nil for PostPolicyBucketHandler (#21323) 2025-05-27 08:18:26 -07:00
VARUN SHARMA
816666a4c6 make some targeted updates to README.md (#21125) 2025-05-26 12:34:56 -07:00
Anis Eleuch
2c7fe094d1 s3: Fix early listing stopping when ILM is enabled (#472) (#21246)
S3 listing call is usually sent with a 'max-keys' parameter. This
'max-keys' will also be passed to WalkDir() call. However, when ILM is
enabled in a bucket and some objects are skipped, the listing can
return IsTruncated set to false even if there are more entries in
the drives.

The reason is that drives stop feeding the listing code because it has
max-keys parameter and the listing code thinks listing is finished
because it is being fed anymore.

Ask the drives to not stop listing and relies on the context
cancellation to stop listing in the drives as fast as possible.
2025-05-26 00:06:43 -07:00
Harshavardhana
9ebe168782 add pull requests etiquette 2025-05-25 09:32:03 -07:00
Minio Trusted
ee2028cde6 Update yaml files to latest version RELEASE.2025-05-24T17-08-30Z 2025-05-24 21:37:47 +00:00
Frank Elsinga
ecde75f911 docs: use github-style-notes in the readme (#21308)
use notes in the readme
2025-05-24 10:08:30 -07:00
jiuker
12a6ea89cc fix: Use mime encode for Non-US-ASCII metadata (#21282) 2025-05-22 08:42:54 -07:00
Anis Eleuch
63e102c049 heal: Avoid disabling scanner healing in single and dist erasure mode (#21302)
A typo disabled the scanner healing in erasure mode. Fix it.
2025-05-22 08:42:29 -07:00
Alex
160f8a901b Update Console UI to latest version (#21294) 2025-05-21 08:59:37 -07:00
jiuker
ef9b03fbf5 fix: unable to get net.Interface cause panic (#21277) 2025-05-16 07:28:04 -07:00
Andreas Auernhammer
1d50cae43d remove support for FIPS 140-2 with boringcrypto (#21292)
This commit removes FIPS 140-2 related code for the following
reasons:
 - FIPS 140-2 is a compliance, not a security requirement. Being
   FIPS 140-2 compliant has no security implication on its own.
   From a tech. perspetive, a FIPS 140-2 compliant implementation
   is not necessarily secure and a non-FIPS 140-2 compliant implementation
   is not necessarily insecure. It depends on the concret design and
   crypto primitives/constructions used.
 - The boringcrypto branch used to achieve FIPS 140-2 compliance was never
   officially supported by the Go team and is now in maintainance mode.
   It is replaced by a built-in FIPS 140-3 module. It will be removed
   eventually. Ref: https://github.com/golang/go/issues/69536
 - FIPS 140-2 modules are no longer re-certified after Sep. 2026.
   Ref: https://csrc.nist.gov/projects/cryptographic-module-validation-program

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2025-05-16 07:27:42 -07:00
Klaus Post
c0a33952c6 Allow FTPS to force TLS (#21251)
Fixes #21249

Example params: `-ftp=force-tls=true -ftp="tls-private-key=ftp/private.key" -ftp="tls-public-cert=ftp/public.crt"`

If MinIO is set up for TLS those certs will be used.
2025-05-09 13:10:19 -07:00
Alex
8cad40a483 Update UI console to the latest version (#21278)
Signed-off-by: Benjamin Perez <benjamin@bexsoft.net>
2025-05-09 13:09:54 -07:00
Harshavardhana
6d18dba9a2 return error for AppendObject() API (#21272) 2025-05-07 08:37:12 -07:00
74 changed files with 606 additions and 427 deletions

View File

@@ -1,59 +0,0 @@
name: FIPS Build Test
on:
pull_request:
branches:
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build:
name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Setup dockerfile for build test
run: |
GO_VERSION=$(go version | cut -d ' ' -f 3 | sed 's/go//')
echo Detected go version $GO_VERSION
cat > Dockerfile.fips.test <<EOF
FROM golang:${GO_VERSION}
COPY . /minio
WORKDIR /minio
ENV GOEXPERIMENT=boringcrypto
RUN make
EOF
- name: Build
uses: docker/build-push-action@v3
with:
context: .
file: Dockerfile.fips.test
push: false
load: true
tags: minio/fips-test:latest
# This should fail if grep returns non-zero exit
- name: Test binary
run: |
docker run --rm minio/fips-test:latest ./minio --version
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio | grep FIPS | grep -q FIPS'

View File

@@ -0,0 +1,93 @@
# MinIO Pull Request Guidelines
These guidelines ensure high-quality commits in MinIOs GitHub repositories, maintaining
a clear, valuable commit history for our open-source projects. They apply to all contributors,
fostering efficient reviews and robust code.
## Why Pull Requests?
Pull Requests (PRs) drive quality in MinIOs codebase by:
- Enabling peer review without pair programming.
- Documenting changes for future reference.
- Ensuring commits tell a clear story of development.
**A poor commit lasts forever, even if code is refactored.**
## Crafting a Quality PR
A strong MinIO PR:
- Delivers a complete, valuable change (feature, bug fix, or improvement).
- Has a concise title (e.g., `[S3] Fix bucket policy parsing #1234`) and a summary with context, referencing issues (e.g., `#1234`).
- Contains well-written, logical commits explaining *why* changes were made (e.g., “Add S3 bucket tagging support so that users can organize resources efficiently”).
- Is small, focused, and easy to review—ideally one commit, unless multiple commits better narrate complex work.
- Adheres to MinIOs coding standards (e.g., Go style, error handling, testing).
PRs must flow smoothly through review to reach production. Large PRs should be split into smaller, manageable ones.
## Submitting PRs
1. **Title and Summary**:
- Use a scannable title: `[Subsystem] Action Description #Issue` (e.g., `[IAM] Add role-based access control #567`).
- Include context in the summary: what changed, why, and any issue references.
- Use `[WIP]` for in-progress PRs to avoid premature merging or choose GitHub draft PRs.
2. **Commits**:
- Write clear messages: what changed and why (e.g., “Refactor S3 API handler to reduce latency so that requests process 20% faster”).
- Rebase to tidy commits before submitting (e.g., `git rebase -i main` to squash typos or reword messages), unless multiple contributors worked on the branch.
- Keep PRs focused—one feature or fix. Split large changes into multiple PRs.
3. **Testing**:
- Include unit tests for new functionality or bug fixes.
- Ensure existing tests pass (`make test`).
- Document testing steps in the PR summary if manual testing was performed.
4. **Before Submitting**:
- Run `make verify` to check formatting, linting, and tests.
- Reference related issues (e.g., “Closes #1234”).
- Notify team members via GitHub `@mentions` if urgent or complex.
## Reviewing PRs
Reviewers ensure MinIOs commit history remains a clear, reliable record. Responsibilities include:
1. **Commit Quality**:
- Verify each commit explains *why* the change was made (e.g., “So that…”).
- Request rebasing if commits are unclear, redundant, or lack context (e.g., “Please squash typo fixes into the parent commit”).
2. **Code Quality**:
- Check adherence to MinIOs Go standards (e.g., error handling, documentation).
- Ensure tests cover new code and pass CI.
- Flag bugs or critical issues for immediate fixes; suggest non-blocking improvements as follow-up issues.
3. **Flow**:
- Review promptly to avoid blocking progress.
- Balance quality and speed—minor issues can be addressed later via issues, not PR blocks.
- If unable to complete the review, tag another reviewer (e.g., `@username please take over`).
4. **Shared Responsibility**:
- All MinIO contributors are reviewers. The first commenter on a PR owns the review unless they delegate.
- Multiple reviewers are encouraged for complex PRs.
5. **No Self-Edits**:
- Dont modify the PR directly (e.g., fixing bugs). Request changes from the submitter or create a follow-up PR.
- If you edit, youre a collaborator, not a reviewer, and cannot merge.
6. **Testing**:
- Assume the submitter tested the code. If testing is unclear, ask for details (e.g., “How was this tested?”).
- Reject untested PRs unless testing is infeasible, then assist with test setup.
## Tips for Success
- **Small PRs**: Easier to review, faster to merge. Split large changes logically.
- **Clear Commits**: Use `git rebase -i` to refine history before submitting.
- **Engage Early**: Discuss complex changes in issues or Slack (https://slack.min.io) before coding.
- **Be Responsive**: Address reviewer feedback promptly to keep PRs moving.
- **Learn from Reviews**: Use feedback to improve future contributions.
## Resources
- [MinIO Coding Standards](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
- [Effective Commit Messages](https://mislav.net/2014/02/hidden-documentation/)
- [GitHub PR Tips](https://github.com/blog/1943-how-to-write-the-perfect-pull-request)
By following these guidelines, we ensure MinIOs codebase remains high-quality, maintainable, and a joy to contribute to. Happy coding!

View File

@@ -1,7 +0,0 @@
# MinIO FIPS Builds
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
MinIO FIPS executables are available at <http://dl.min.io> - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.

View File

@@ -4,7 +4,13 @@
[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io)
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. To learn more about what MinIO is doing for AI storage, go to [AI storage documentation](https://min.io/solutions/object-storage-for-ai).
MinIO is a high-performance, S3-compatible object storage solution released under the GNU AGPL v3.0 license. Designed for speed and scalability, it powers AI/ML, analytics, and data-intensive workloads with industry-leading performance.
🔹 S3 API Compatible Seamless integration with existing S3 tools
🔹 Built for AI & Analytics Optimized for large-scale data pipelines
🔹 High Performance Ideal for demanding storage workloads.
AI storage documentation (https://min.io/solutions/object-storage-for-ai).
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
@@ -34,7 +40,9 @@ You can also connect using any S3-compatible tool, such as the MinIO Client `mc`
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
> [!NOTE]
> To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option.
> For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
## macOS
@@ -51,7 +59,8 @@ brew install minio/stable/minio
minio server /data
```
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
> [!NOTE]
> If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
```sh
brew uninstall minio
@@ -98,7 +107,8 @@ The MinIO deployment starts using default root credentials `minioadmin:minioadmi
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
> [!NOTE]
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Microsoft Windows
@@ -118,7 +128,8 @@ The MinIO deployment starts using default root credentials `minioadmin:minioadmi
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
> [!NOTE]
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Install from Source
@@ -132,7 +143,8 @@ The MinIO deployment starts using default root credentials `minioadmin:minioadmi
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
> [!NOTE]
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
@@ -170,7 +182,8 @@ This command gets the active zone(s). Now, apply port rules to the relevant zone
firewall-cmd --zone=public --add-port=9000/tcp --permanent
```
Note that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
> [!NOTE]
> `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
```sh
firewall-cmd --reload
@@ -199,7 +212,8 @@ service iptables restart
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
> NOTE: MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
> [!NOTE]
> MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
### Things to consider
@@ -221,7 +235,8 @@ For example, consider a MinIO deployment behind a proxy `https://minio.example.n
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
> [!NOTE]
> requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)

View File

@@ -74,11 +74,11 @@ check_minimum_version() {
assert_is_supported_arch() {
case "${ARCH}" in
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64)
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]"
exit 1
;;
esac

View File

@@ -9,7 +9,7 @@ function _init() {
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64"
}
function _build() {

View File

@@ -23,6 +23,7 @@ import (
"encoding/json"
"encoding/xml"
"fmt"
"mime"
"net/http"
"strconv"
"strings"
@@ -168,6 +169,32 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
if !stringsHasPrefixFold(k, userMetadataPrefix) {
continue
}
// check the doc https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
// For metadata values like "ö", "ÄMÄZÕÑ S3", and "öha, das sollte eigentlich
// funktionieren", tested against a real AWS S3 bucket, S3 may encode incorrectly. For
// example, "ö" was encoded as =?UTF-8?B?w4PCtg==?=, producing invalid UTF-8 instead
// of =?UTF-8?B?w7Y=?=. This mirrors errors like the ä½ in another string.
//
// S3 uses B-encoding (Base64) for non-ASCII-heavy metadata and Q-encoding
// (quoted-printable) for mostly ASCII strings. Long strings are split at word
// boundaries to fit RFC 2047s 75-character limit, ensuring HTTP parser
// compatibility.
//
// However, this splitting increases header size and can introduce errors, unlike Gos
// mime package in MinIO, which correctly encodes strings with fixed B/Q encodings,
// avoiding S3s heuristic-driven issues.
//
// For MinIO developers, decode S3 metadata with mime.WordDecoder, validate outputs,
// report encoding bugs to AWS, and use ASCII-only metadata to ensure reliable S3 API
// compatibility.
if needsMimeEncoding(v) {
// see https://github.com/golang/go/blob/release-branch.go1.24/src/net/mail/message.go#L325
if strings.ContainsAny(v, "\"#$%&'(),.:;<>@[]^`{|}~") {
v = mime.BEncoding.Encode("UTF-8", v)
} else {
v = mime.QEncoding.Encode("UTF-8", v)
}
}
w.Header()[strings.ToLower(k)] = []string{v}
isSet = true
break
@@ -229,3 +256,14 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
return nil
}
// needsEncoding reports whether s contains any bytes that need to be encoded.
// see mime.needsEncoding
func needsMimeEncoding(s string) bool {
for _, b := range s {
if (b < ' ' || b > '~') && b != '\t' {
return true
}
}
return false
}

View File

@@ -387,6 +387,11 @@ func registerAPIRouter(router *mux.Router) {
HeadersRegexp(xhttp.AmzSnowballExtract, "true").
HandlerFunc(s3APIMiddleware(api.PutObjectExtractHandler, traceHdrsS3HFlag))
// AppendObject to be rejected
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzWriteOffsetBytes, "").
HandlerFunc(s3APIMiddleware(errorResponseHandler))
// PutObject
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectHandler, traceHdrsS3HFlag))

View File

@@ -43,7 +43,7 @@ func shouldEscape(c byte) bool {
// - Force encoding of '~'
func s3URLEncode(s string) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
for i := range len(s) {
c := s[i]
if shouldEscape(c) {
if c == ' ' {
@@ -70,7 +70,7 @@ func s3URLEncode(s string) string {
if hexCount == 0 {
copy(t, s)
for i := 0; i < len(s); i++ {
for i := range len(s) {
if s[i] == ' ' {
t[i] = '+'
}
@@ -79,7 +79,7 @@ func s3URLEncode(s string) string {
}
j := 0
for i := 0; i < len(s); i++ {
for i := range len(s) {
switch c := s[i]; {
case c == ' ':
t[j] = '+'

View File

@@ -102,7 +102,7 @@ func waitForLowHTTPReq() {
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
bgSeq := newBgHealSequence()
// Run the background healer
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
for range globalBackgroundHealRoutine.workers {
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
}

View File

@@ -248,7 +248,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
pInfo PartInfo
)
for i := 0; i < partsCount; i++ {
for i := range partsCount {
gopts := minio.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
PartNumber: i + 1,

View File

@@ -1089,6 +1089,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
break
}
// check if have a file
if reader == nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing"))
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
if keyName, ok := formValues["Key"]; !ok {
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))

View File

@@ -38,7 +38,6 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v3/policy"
@@ -556,7 +555,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
if err != nil {
return output, metabytes, err
}
@@ -590,6 +589,6 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
}
outbuf := bytes.NewBuffer(nil)
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
return outbuf.Bytes(), err
}

View File

@@ -113,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
sys.RUnlock()
// send last n console log messages in order filtered by node
if cnt > 0 {
for i := 0; i < last; i++ {
for i := range last {
entry := lastN[(cnt+i)%last]
if (entry == log.Info{}) {
continue

View File

@@ -332,7 +332,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, drive *xlStorage, c
}
var skipHeal atomic.Bool
if globalIsErasure || cache.Info.SkipHealing {
if !globalIsErasure || cache.Info.SkipHealing {
skipHeal.Store(true)
}

View File

@@ -37,7 +37,6 @@ import (
"github.com/minio/kms-go/kes"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/etag"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http"
@@ -427,7 +426,7 @@ func newEncryptReader(ctx context.Context, content io.Reader, kind crypto.Type,
return nil, crypto.ObjectKey{}, err
}
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
if err != nil {
return nil, crypto.ObjectKey{}, crypto.ErrInvalidCustomerKey
}
@@ -570,7 +569,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
reader, err := sio.DecryptReader(client, sio.Config{
Key: objectEncryptionKey,
SequenceNumber: seqNumber,
CipherSuites: fips.DARECiphers(),
})
if err != nil {
return nil, crypto.ErrInvalidCustomerKey
@@ -1062,7 +1060,7 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn {
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte(baseKey))
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil)}); err != nil {
logger.CriticalIf(context.Background(), errors.New("unable to encrypt using object key"))
}
return buffer.Bytes()
@@ -1085,7 +1083,7 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
}
mac := hmac.New(sha256.New, key)
mac.Write([]byte(baseKey))
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil)})
}
}

View File

@@ -1481,7 +1481,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
}
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk

View File

@@ -504,7 +504,7 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
// count the number of offline disks
offline := 0
for i := 0; i < len(errs); i++ {
for i := range len(errs) {
var found bool
switch {
case errors.Is(errs[i], errDiskNotFound):
@@ -1221,7 +1221,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
partsMetadata[index].SetInlineData()
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
@@ -1557,7 +1557,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
@@ -1574,7 +1574,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
if len(versions) == 0 {
// Whether a disk was initially or becomes offline
// during this upload, send it to the MRF list.
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}

View File

@@ -149,7 +149,7 @@ func (z *erasureServerPools) findIndex(index int) int {
if z.rebalMeta == nil {
return 0
}
for i := 0; i < len(z.rebalMeta.PoolStats); i++ {
for i := range len(z.rebalMeta.PoolStats) {
if i == index {
return index
}

View File

@@ -1530,10 +1530,8 @@ func (z *erasureServerPools) listObjectsGeneric(ctx context.Context, bucket, pre
}
if loi.IsTruncated && merged.lastSkippedEntry > loi.NextMarker {
// An object hidden by ILM was found during a truncated listing. Since the number of entries
// fetched from drives is limited by max-keys, we should use the last ILM filtered entry
// as a continuation token if it is lexially higher than the last visible object so that the
// next call of WalkDir() with the max-keys can reach new objects not seen previously.
// An object hidden by ILM was found during a truncated listing. Set the next marker
// as the last skipped entry if it is lexically higher loi.NextMarker as an optimization
loi.NextMarker = merged.lastSkippedEntry
}
@@ -1711,7 +1709,9 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
}
z.mpCache.Range(func(_ string, mp MultipartInfo) bool {
poolResult.Uploads = append(poolResult.Uploads, mp)
if mp.Bucket == bucket {
poolResult.Uploads = append(poolResult.Uploads, mp)
}
return true
})
sort.Slice(poolResult.Uploads, func(i int, j int) bool {

View File

@@ -95,7 +95,7 @@ func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI {
s.erasureDisksMu.RLock()
defer s.erasureDisksMu.RUnlock()
for i := 0; i < s.setCount; i++ {
for i := range s.setCount {
for j := 0; j < s.setDriveCount; j++ {
disk := s.erasureDisks[i][j]
if disk == OfflineDisk {
@@ -150,7 +150,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int,
if diskID == offlineDiskUUID {
return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID)
}
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
for i := range len(refFormat.Erasure.Sets) {
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
if refFormat.Erasure.Sets[i][j] == diskID {
return i, j, nil
@@ -174,7 +174,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This)
}
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
for i := range len(refFormat.Erasure.Sets) {
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
if refFormat.Erasure.Sets[i][j] == format.Erasure.This {
return i, j, nil
@@ -377,7 +377,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
mutex := newNSLock(globalIsDistErasure)
for i := 0; i < setCount; i++ {
for i := range setCount {
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
}
@@ -390,7 +390,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
var wg sync.WaitGroup
var lk sync.Mutex
for i := 0; i < setCount; i++ {
for i := range setCount {
lockerEpSet := set.NewStringSet()
for j := 0; j < setDriveCount; j++ {
wg.Add(1)
@@ -409,7 +409,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
}
wg.Wait()
for i := 0; i < setCount; i++ {
for i := range setCount {
wg.Add(1)
go func(i int) {
defer wg.Done()

View File

@@ -98,7 +98,7 @@ func fmtGenMain(ctxt *cli.Context) {
setCount, setDriveCount := pool.SetCount, pool.DrivesPerSet
format := newFormatErasureV3(setCount, setDriveCount)
format.ID = deploymentID
for i := 0; i < setCount; i++ { // for each erasure set
for i := range setCount { // for each erasure set
for j := 0; j < setDriveCount; j++ {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]

View File

@@ -157,7 +157,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgoV3
format.Erasure.Sets = make([][]string, numSets)
for i := 0; i < numSets; i++ {
for i := range numSets {
format.Erasure.Sets[i] = make([]string, setLen)
for j := 0; j < setLen; j++ {
format.Erasure.Sets[i][j] = mustGetUUID()
@@ -514,7 +514,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
}
// Make sure that the diskID is found in the set.
for i := 0; i < len(tmpFormat.Erasure.Sets); i++ {
for i := range len(tmpFormat.Erasure.Sets) {
for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ {
if this == tmpFormat.Erasure.Sets[i][j] {
return nil
@@ -639,7 +639,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
return nil, err
}
for i := 0; i < setCount; i++ {
for i := range setCount {
hostCount := make(map[string]int, setDriveCount)
for j := 0; j < setDriveCount; j++ {
disk := storageDisks[i*setDriveCount+j]

View File

@@ -75,6 +75,7 @@ func startFTPServer(args []string) {
portRange string
tlsPrivateKey string
tlsPublicCert string
forceTLS bool
)
var err error
@@ -103,6 +104,11 @@ func startFTPServer(args []string) {
tlsPrivateKey = tokens[1]
case "tls-public-cert":
tlsPublicCert = tokens[1]
case "force-tls":
forceTLS, err = strconv.ParseBool(tokens[1])
if err != nil {
logger.Fatal(fmt.Errorf("invalid arguments passed to --ftp=%s (%v)", arg, err), "unable to start FTP server")
}
}
}
@@ -129,6 +135,10 @@ func startFTPServer(args []string) {
tls := tlsPrivateKey != "" && tlsPublicCert != ""
if forceTLS && !tls {
logger.Fatal(fmt.Errorf("invalid TLS arguments provided. force-tls, but missing private key --ftp=\"tls-private-key=path/to/private.key\""), "unable to start FTP server")
}
name := "MinIO FTP Server"
if tls {
name = "MinIO FTP(Secure) Server"
@@ -147,6 +157,7 @@ func startFTPServer(args []string) {
Logger: &minioLogger{},
PassivePorts: portRange,
PublicIP: publicIP,
ForceTLS: forceTLS,
})
if err != nil {
logger.Fatal(err, "unable to initialize FTP server")

View File

@@ -22,7 +22,7 @@ import (
"crypto/tls"
"sync/atomic"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/grid"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/rest"
@@ -52,8 +52,8 @@ func initGlobalGrid(ctx context.Context, eps EndpointServerPools) error {
newCachedAuthToken(),
&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.TLSCiphers(),
CurvePreferences: fips.TLSCurveIDs(),
CipherSuites: crypto.TLSCiphers(),
CurvePreferences: crypto.TLSCurveIDs(),
}),
Local: local,
Hosts: hosts,
@@ -85,8 +85,8 @@ func initGlobalLockGrid(ctx context.Context, eps EndpointServerPools) error {
newCachedAuthToken(),
&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.TLSCiphers(),
CurvePreferences: fips.TLSCurveIDs(),
CipherSuites: crypto.TLSCiphers(),
CurvePreferences: crypto.TLSCurveIDs(),
}, grid.RouteLockPath),
Local: local,
Hosts: hosts,

View File

@@ -25,6 +25,7 @@ import (
"net/textproto"
"regexp"
"strings"
"sync/atomic"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
@@ -427,9 +428,31 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
default:
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
defer atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
// When we are not running in S3 Express mode, generate appropriate error
// for x-amz-write-offset HEADER specified.
if _, ok := r.Header[xhttp.AmzWriteOffsetBytes]; ok {
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
if ok {
tc.FuncName = "s3.AppendObject"
tc.ResponseRecorder.LogErrBody = true
}
writeErrorResponse(r.Context(), w, getAPIError(ErrNotImplemented), r.URL)
return
}
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
if ok {
tc.FuncName = "s3.ValidRequest"
tc.ResponseRecorder.LogErrBody = true
}
writeErrorResponse(r.Context(), w, APIError{
Code: "BadRequest",
Description: fmt.Sprintf("An error occurred when parsing the HTTP request %s at '%s'",
Description: fmt.Sprintf("An unsupported API call for method: %s at '%s'",
r.Method, r.URL.Path),
HTTPStatusCode: http.StatusBadRequest,
}, r.URL)

View File

@@ -223,7 +223,11 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
go func(o listPathOptions) {
defer wg.Done()
o.StopDiskAtLimit = true
if o.Lifecycle == nil {
// No filtering ahead, ask drives to stop
// listing exactly at a specific limit.
o.StopDiskAtLimit = true
}
listErr = z.listMerged(listCtx, o, filterCh)
o.debugln("listMerged returned with", listErr)
}(*o)
@@ -422,6 +426,9 @@ func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions
go func() {
var returned bool
for entry := range inCh {
if o.shouldSkip(ctx, entry) {
continue
}
if !returned {
funcReturnedMu.Lock()
returned = funcReturned

View File

@@ -174,6 +174,31 @@ func (o *listPathOptions) debugln(data ...interface{}) {
}
}
func (o *listPathOptions) shouldSkip(ctx context.Context, entry metaCacheEntry) (yes bool) {
if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) {
return true
}
if o.Marker != "" && entry.name < o.Marker {
return true
}
if !strings.HasPrefix(entry.name, o.Prefix) {
return true
}
if o.Separator != "" && entry.isDir() && !strings.Contains(strings.TrimPrefix(entry.name, o.Prefix), o.Separator) {
return true
}
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
return true
}
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() {
return true
}
if o.Lifecycle != nil || o.Replication.Config != nil {
return triggerExpiryAndRepl(ctx, *o, entry)
}
return false
}
// gatherResults will collect all results on the input channel and filter results according
// to the options or to the current bucket ILM expiry rules.
// Caller should close the channel when done.
@@ -199,27 +224,10 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache
resCh = nil
continue
}
if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) {
if yes := o.shouldSkip(ctx, entry); yes {
results.lastSkippedEntry = entry.name
continue
}
if o.Marker != "" && entry.name < o.Marker {
continue
}
if !strings.HasPrefix(entry.name, o.Prefix) {
continue
}
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
continue
}
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() {
continue
}
if o.Lifecycle != nil || o.Replication.Config != nil {
if skipped := triggerExpiryAndRepl(ctx, *o, entry); skipped {
results.lastSkippedEntry = entry.name
continue
}
}
if o.Limit > 0 && results.len() >= o.Limit {
// We have enough and we have more.
// Do not return io.EOF

View File

@@ -758,7 +758,7 @@ func (r *metacacheReader) Close() error {
return nil
}
// metacacheBlockWriter collects blocks and provides a callaback to store them.
// metacacheBlockWriter collects blocks and provides a callback to store them.
type metacacheBlockWriter struct {
wg sync.WaitGroup
streamErr error

View File

@@ -19,6 +19,7 @@ package cmd
import (
"context"
"errors"
"io"
"sort"
"strings"
@@ -68,6 +69,7 @@ const (
// WalkDir will traverse a directory and return all entries found.
// On success a sorted meta cache stream will be returned.
// Metadata has data stripped, if any.
// The function tries to quit as fast as the context is canceled to avoid further drive IO
func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) (err error) {
legacyFS := s.fsType != xfs && s.fsType != ext4
@@ -146,6 +148,13 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
var scanDir func(path string) error
scanDir = func(current string) error {
if contextCanceled(ctx) {
return ctx.Err()
}
if opts.Limit > 0 && objsReturned >= opts.Limit {
return nil
}
// Skip forward, if requested...
sb := bytebufferpool.Get()
defer func() {
@@ -161,12 +170,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
forward = forward[:idx]
}
}
if contextCanceled(ctx) {
return ctx.Err()
}
if opts.Limit > 0 && objsReturned >= opts.Limit {
return nil
}
if s.walkMu != nil {
s.walkMu.Lock()
@@ -197,6 +200,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
// Avoid a bunch of cleanup when joining.
current = strings.Trim(current, SlashSeparator)
for i, entry := range entries {
if contextCanceled(ctx) {
return ctx.Err()
}
if opts.Limit > 0 && objsReturned >= opts.Limit {
return nil
}
@@ -292,15 +298,15 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
}
for _, entry := range entries {
if contextCanceled(ctx) {
return ctx.Err()
}
if opts.Limit > 0 && objsReturned >= opts.Limit {
return nil
}
if entry == "" {
continue
}
if contextCanceled(ctx) {
return ctx.Err()
}
meta := metaCacheEntry{name: pathJoinBuf(sb, current, entry)}
// If directory entry on stack before this, pop it now.
@@ -314,7 +320,10 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
if opts.Recursive {
// Scan folder we found. Should be in correct sort order where we are.
err := scanDir(pop)
if err != nil && !IsErrIgnored(err, context.Canceled) {
if err != nil {
if errors.Is(err, context.Canceled) {
return err
}
internalLogIf(ctx, err)
}
}

View File

@@ -49,61 +49,61 @@ const (
var (
bucketReplLastHrFailedBytesMD = NewGaugeMD(bucketReplLastHrFailedBytes,
"Total number of bytes failed at least once to replicate in the last hour on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastHrFailedCountMD = NewGaugeMD(bucketReplLastHrFailedCount,
"Total number of objects which failed replication in the last hour on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastMinFailedBytesMD = NewGaugeMD(bucketReplLastMinFailedBytes,
"Total number of bytes failed at least once to replicate in the last full minute on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastMinFailedCountMD = NewGaugeMD(bucketReplLastMinFailedCount,
"Total number of objects which failed replication in the last full minute on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLatencyMsMD = NewGaugeMD(bucketReplLatencyMs,
"Replication latency on a bucket in milliseconds",
bucketL, operationL, rangeL, targetArnL)
bucketReplProxiedDeleteTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsTotal,
"Number of DELETE tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetRequestsFailures,
"Number of failures in GET requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetRequestsTotalMD = NewCounterMD(bucketReplProxiedGetRequestsTotal,
"Number of GET requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsFailures,
"Number of failures in GET tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsTotal,
"Number of GET tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedHeadRequestsFailuresMD = NewCounterMD(bucketReplProxiedHeadRequestsFailures,
"Number of failures in HEAD requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedHeadRequestsTotalMD = NewCounterMD(bucketReplProxiedHeadRequestsTotal,
"Number of HEAD requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedPutTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsFailures,
"Number of failures in PUT tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedPutTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsTotal,
"Number of PUT tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplSentBytesMD = NewCounterMD(bucketReplSentBytes,
"Total number of bytes replicated to the target",
bucketL)
bucketL, targetArnL)
bucketReplSentCountMD = NewCounterMD(bucketReplSentCount,
"Total number of objects replicated to the target",
bucketL)
bucketL, targetArnL)
bucketReplTotalFailedBytesMD = NewCounterMD(bucketReplTotalFailedBytes,
"Total number of bytes failed at least once to replicate since server start",
bucketL)
bucketL, targetArnL)
bucketReplTotalFailedCountMD = NewCounterMD(bucketReplTotalFailedCount,
"Total number of objects which failed replication since server start",
bucketL)
bucketL, targetArnL)
bucketReplProxiedDeleteTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsFailures,
"Number of failures in DELETE tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
)
// loadBucketReplicationMetrics - `BucketMetricsLoaderFn` for bucket replication metrics
@@ -121,11 +121,11 @@ func loadBucketReplicationMetrics(ctx context.Context, m MetricValues, c *metric
bucketReplStats := globalReplicationStats.Load().getAllLatest(dataUsageInfo.BucketsUsage)
for _, bucket := range buckets {
labels := []string{bucketL, bucket}
if s, ok := bucketReplStats[bucket]; ok {
stats := s.ReplicationStats
if stats.hasReplicationUsage() {
for arn, stat := range stats.Stats {
labels := []string{bucketL, bucket, targetArnL, arn}
m.Set(bucketReplLastHrFailedBytes, float64(stat.Failed.LastHour.Bytes), labels...)
m.Set(bucketReplLastHrFailedCount, float64(stat.Failed.LastHour.Count), labels...)
m.Set(bucketReplLastMinFailedBytes, float64(stat.Failed.LastMinute.Bytes), labels...)

View File

@@ -266,7 +266,7 @@ func (m *mrfState) healRoutine(z *erasureServerPools) {
if len(u.Versions) > 0 {
vers := len(u.Versions) / 16
if vers > 0 {
for i := 0; i < vers; i++ {
for i := range vers {
healObject(u.Bucket, u.Object, uuid.UUID(u.Versions[16*i:]).String(), scan)
}
}

View File

@@ -123,7 +123,7 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
}
retryCount := g.retryCount
for i := 0; i < retryCount; i++ {
for i := range retryCount {
g.errs[index].Err = nil
if err := f(); err != nil {
g.errs[index].Err = err

View File

@@ -26,6 +26,9 @@ import (
"strconv"
"strings"
"testing"
"time"
"github.com/minio/minio/internal/bucket/lifecycle"
)
func TestListObjectsVersionedFolders(t *testing.T) {
@@ -1929,3 +1932,121 @@ func BenchmarkListObjects(b *testing.B) {
}
}
}
func TestListObjectsWithILM(t *testing.T) {
ExecObjectLayerTest(t, testListObjectsWithILM)
}
func testListObjectsWithILM(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
// Prepare lifecycle expiration workers
es := newExpiryState(t1.Context(), obj, 0)
globalExpiryState = es
t, _ := t1.(*testing.T)
objContent := "test-content"
objMd5 := md5.Sum([]byte(objContent))
uploads := []struct {
bucket string
expired int
notExpired int
}{
{"test-list-ilm-nothing-expired", 0, 6},
{"test-list-ilm-all-expired", 6, 0},
{"test-list-ilm-all-half-expired", 3, 3},
}
oneWeekAgo := time.Now().Add(-7 * 24 * time.Hour)
lifecycleBytes := []byte(`
<LifecycleConfiguration>
<Rule>
<Status>Enabled</Status>
<Expiration>
<Days>1</Days>
</Expiration>
</Rule>
</LifecycleConfiguration>
`)
lifecycleConfig, err := lifecycle.ParseLifecycleConfig(bytes.NewReader(lifecycleBytes))
if err != nil {
t.Fatal(err)
}
for i, upload := range uploads {
err := obj.MakeBucket(context.Background(), upload.bucket, MakeBucketOptions{})
if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error())
}
metadata, err := globalBucketMetadataSys.Get(upload.bucket)
if err != nil {
t.Fatal(err)
}
metadata.lifecycleConfig = lifecycleConfig
globalBucketMetadataSys.Set(upload.bucket, metadata)
defer globalBucketMetadataSys.Remove(upload.bucket)
// Upload objects which modtime as one week ago, supposed to be expired by ILM
for range upload.expired {
_, err := obj.PutObject(context.Background(), upload.bucket, randString(32),
mustGetPutObjReader(t,
bytes.NewBufferString(objContent),
int64(len(objContent)),
hex.EncodeToString(objMd5[:]),
""),
ObjectOptions{MTime: oneWeekAgo},
)
if err != nil {
t.Fatal(err)
}
}
// Upload objects which current time as modtime, not expired by ILM
for range upload.notExpired {
_, err := obj.PutObject(context.Background(), upload.bucket, randString(32),
mustGetPutObjReader(t,
bytes.NewBufferString(objContent),
int64(len(objContent)),
hex.EncodeToString(objMd5[:]),
""),
ObjectOptions{},
)
if err != nil {
t.Fatal(err)
}
}
for _, maxKeys := range []int{1, 10, 49} {
// Test ListObjects V2
totalObjs, didRuns := 0, 0
marker := ""
for {
didRuns++
if didRuns > 1000 {
t.Fatal("too many runs")
return
}
result, err := obj.ListObjectsV2(context.Background(), upload.bucket, "", marker, "", maxKeys, false, "")
if err != nil {
t.Fatalf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i, instanceType, err.Error())
}
totalObjs += len(result.Objects)
if !result.IsTruncated {
break
}
if marker != "" && marker == result.NextContinuationToken {
t.Fatalf("infinite loop marker: %s", result.NextContinuationToken)
}
marker = result.NextContinuationToken
}
if totalObjs != upload.notExpired {
t.Fatalf("Test %d: %s: max-keys=%d, %d objects are expected to be seen, but %d found instead (didRuns=%d)",
i+1, instanceType, maxKeys, upload.notExpired, totalObjs, didRuns)
}
}
}
}

View File

@@ -128,7 +128,7 @@ func IsValidBucketName(bucket string) bool {
// 'label' in AWS terminology and if the bucket looks
// like an IP address.
isNotNumber := false
for i := 0; i < len(piece); i++ {
for i := range len(piece) {
switch {
case (piece[i] >= 'a' && piece[i] <= 'z' ||
piece[i] == '-'):
@@ -254,11 +254,11 @@ func concat(ss ...string) string {
}
// create & allocate the memory in advance.
n := 0
for i := 0; i < length; i++ {
for i := range length {
n += len(ss[i])
}
b := make([]byte, 0, n)
for i := 0; i < length; i++ {
for i := range length {
b = append(b, ss[i]...)
}
return unsafe.String(unsafe.SliceData(b), n)

View File

@@ -42,7 +42,6 @@ import (
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/etag"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/hash/sha256"
@@ -527,9 +526,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
encReader, err := sio.EncryptReader(reader, sio.Config{
Key: partEncryptionKey[:],
CipherSuites: fips.DARECiphers(),
Nonce: &nonce,
Key: partEncryptionKey[:],
Nonce: &nonce,
})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@@ -825,9 +823,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
copy(nonce[:], tmp[:12])
reader, err = sio.EncryptReader(in, sio.Config{
Key: partEncryptionKey[:],
CipherSuites: fips.DARECiphers(),
Nonce: &nonce,
Key: partEncryptionKey[:],
Nonce: &nonce,
})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)

View File

@@ -77,7 +77,7 @@ func setupTestReadDirEmpty(t *testing.T) (testResults []result) {
func setupTestReadDirFiles(t *testing.T) (testResults []result) {
dir := t.TempDir()
entries := []string{}
for i := 0; i < 10; i++ {
for i := range 10 {
name := fmt.Sprintf("file-%d", i)
if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
@@ -102,7 +102,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) {
t.Fatalf("Unable to create prefix directory \"mydir\", %s", err)
}
entries := []string{"mydir/"}
for i := 0; i < 10; i++ {
for i := range 10 {
name := fmt.Sprintf("file-%d", i)
if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
@@ -126,7 +126,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) {
}
dir := t.TempDir()
entries := []string{}
for i := 0; i < 10; i++ {
for i := range 10 {
name1 := fmt.Sprintf("file-%d", i)
name2 := fmt.Sprintf("file-%d", i+10)
if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {

View File

@@ -452,7 +452,9 @@ func initAllSubsystems(ctx context.Context) {
globalNotificationSys = NewNotificationSys(globalEndpoints)
// Create new notification system
globalEventNotifier = NewEventNotifier(GlobalContext)
if globalEventNotifier == nil {
globalEventNotifier = NewEventNotifier(GlobalContext)
}
// Create new bucket metadata system.
if globalBucketMetadataSys == nil {

View File

@@ -102,7 +102,7 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT
var totalUploadTimes madmin.TimeDurations
var totalDownloadTimes madmin.TimeDurations
var totalDownloadTTFB madmin.TimeDurations
for i := 0; i < len(throughputHighestResults); i++ {
for i := range len(throughputHighestResults) {
errStr := ""
if throughputHighestResults[i].Error != "" {
errStr = throughputHighestResults[i].Error

View File

@@ -675,7 +675,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
versions := make([]FileInfoVersions, totalVersions)
decoder := msgpNewReader(r.Body)
defer readMsgpReaderPoolPut(decoder)
for i := 0; i < totalVersions; i++ {
for i := range totalVersions {
dst := &versions[i]
if err := dst.DecodeMsg(decoder); err != nil {
s.writeErrorResponse(w, err)

View File

@@ -50,8 +50,13 @@ const (
updateTimeout = 10 * time.Second
)
// For windows our files have .exe additionally.
var minioReleaseWindowsInfoURL = MinioReleaseURL + "minio.exe.sha256sum"
var (
// Newer official download info URLs appear earlier below.
minioReleaseInfoURL = MinioReleaseURL + "minio.sha256sum"
// For windows our files have .exe additionally.
minioReleaseWindowsInfoURL = MinioReleaseURL + "minio.exe.sha256sum"
)
// minioVersionToReleaseTime - parses a standard official release
// MinIO version string.

View File

@@ -1,24 +0,0 @@
//go:build fips
// +build fips
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
// Newer official download info URLs appear earlier below.
var minioReleaseInfoURL = MinioReleaseURL + "minio.fips.sha256sum"

View File

@@ -1,24 +0,0 @@
//go:build !fips
// +build !fips
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
// Newer official download info URLs appear earlier below.
var minioReleaseInfoURL = MinioReleaseURL + "minio.sha256sum"

View File

@@ -52,7 +52,7 @@ import (
"github.com/minio/minio/internal/config/api"
xtls "github.com/minio/minio/internal/config/identity/tls"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
@@ -612,8 +612,8 @@ func NewInternodeHTTPTransport(maxIdleConnsPerHost int) func() http.RoundTripper
LookupHost: globalDNSCache.LookupHost,
DialTimeout: rest.DefaultTimeout,
RootCAs: globalRootCAs,
CipherSuites: fips.TLSCiphers(),
CurvePreferences: fips.TLSCurveIDs(),
CipherSuites: crypto.TLSCiphers(),
CurvePreferences: crypto.TLSCurveIDs(),
EnableHTTP2: false,
TCPOptions: globalTCPOptions,
}.NewInternodeHTTPTransport(maxIdleConnsPerHost)
@@ -626,8 +626,8 @@ func NewHTTPTransportWithClientCerts(clientCert, clientKey string) http.RoundTri
LookupHost: globalDNSCache.LookupHost,
DialTimeout: defaultDialTimeout,
RootCAs: globalRootCAs,
CipherSuites: fips.TLSCiphersBackwardCompatible(),
CurvePreferences: fips.TLSCurveIDs(),
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
CurvePreferences: crypto.TLSCurveIDs(),
TCPOptions: globalTCPOptions,
EnableHTTP2: false,
}
@@ -665,8 +665,8 @@ func NewHTTPTransportWithTimeout(timeout time.Duration) *http.Transport {
DialTimeout: defaultDialTimeout,
RootCAs: globalRootCAs,
TCPOptions: globalTCPOptions,
CipherSuites: fips.TLSCiphersBackwardCompatible(),
CurvePreferences: fips.TLSCurveIDs(),
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
CurvePreferences: crypto.TLSCurveIDs(),
EnableHTTP2: false,
}.NewHTTPTransportWithTimeout(timeout)
}
@@ -677,8 +677,8 @@ func NewRemoteTargetHTTPTransport(insecure bool) func() *http.Transport {
return xhttp.ConnSettings{
LookupHost: globalDNSCache.LookupHost,
RootCAs: globalRootCAs,
CipherSuites: fips.TLSCiphersBackwardCompatible(),
CurvePreferences: fips.TLSCurveIDs(),
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
CurvePreferences: crypto.TLSCurveIDs(),
TCPOptions: globalTCPOptions,
EnableHTTP2: false,
}.NewRemoteTargetHTTPTransport(insecure)
@@ -851,7 +851,7 @@ func lcp(strs []string, pre bool) string {
// compare letters
if pre {
// prefix, iterate left to right
for i := 0; i < maxl; i++ {
for i := range maxl {
if xfix[i] != str[i] {
xfix = xfix[:i]
break
@@ -859,7 +859,7 @@ func lcp(strs []string, pre bool) string {
}
} else {
// suffix, iterate right to left
for i := 0; i < maxl; i++ {
for i := range maxl {
xi := xfixl - i - 1
si := strl - i - 1
if xfix[xi] != str[si] {
@@ -986,11 +986,11 @@ func newTLSConfig(getCert certs.GetCertificateFunc) *tls.Config {
}
if secureCiphers := env.Get(api.EnvAPISecureCiphers, config.EnableOn) == config.EnableOn; secureCiphers {
tlsConfig.CipherSuites = fips.TLSCiphers()
tlsConfig.CipherSuites = crypto.TLSCiphers()
} else {
tlsConfig.CipherSuites = fips.TLSCiphersBackwardCompatible()
tlsConfig.CipherSuites = crypto.TLSCiphersBackwardCompatible()
}
tlsConfig.CurvePreferences = fips.TLSCurveIDs()
tlsConfig.CurvePreferences = crypto.TLSCurveIDs()
return tlsConfig
}

View File

@@ -163,6 +163,7 @@ func newWarmBackendS3(conf madmin.TierS3, tier string) (*warmBackendS3, error) {
Creds: creds,
Secure: u.Scheme == "https",
Transport: globalRemoteTargetTransport,
Region: conf.Region,
}
client, err := minio.New(u.Host, opts)
if err != nil {

View File

@@ -846,7 +846,7 @@ func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte,
// Any non-nil error is returned.
func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
var tHdr, tMeta []byte // Zero copy bytes
for i := 0; i < versions; i++ {
for i := range versions {
tHdr, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return err

View File

@@ -2976,7 +2976,7 @@ func (s *xlStorage) RenamePart(ctx context.Context, srcVolume, srcPath, dstVolum
return errFileAccessDenied
}
err = osErrToFileErr(err)
if errors.Is(err, errFileNotFound) {
if errors.Is(err, errFileNotFound) || errors.Is(err, errFileAccessDenied) {
return errUploadIDNotFound
}
return err

View File

@@ -2,7 +2,7 @@ version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:RELEASE.2025-04-22T22-12-26Z
image: quay.io/minio/minio:RELEASE.2025-05-24T17-08-30Z
command: server --console-address ":9001" http://minio{1...4}/data{1...2}
expose:
- "9000"

4
go.mod
View File

@@ -51,7 +51,7 @@ require (
github.com/lithammer/shortuuid/v4 v4.2.0
github.com/miekg/dns v1.1.65
github.com/minio/cli v1.24.2
github.com/minio/console v1.7.6
github.com/minio/console v1.7.7-0.20250516212319-220a55500cc3
github.com/minio/csvparser v1.0.0
github.com/minio/dnscache v0.1.1
github.com/minio/dperf v0.6.3
@@ -212,7 +212,7 @@ require (
github.com/minio/colorjson v1.0.8 // indirect
github.com/minio/crc64nvme v1.0.1 // indirect
github.com/minio/filepath v1.0.0 // indirect
github.com/minio/mc v0.0.0-20250312172924-c1d5d4cbb4ca // indirect
github.com/minio/mc v0.0.0-20250313080218-cf909e1063a9 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/websocket v1.6.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect

8
go.sum
View File

@@ -421,8 +421,8 @@ github.com/minio/cli v1.24.2 h1:J+fCUh9mhPLjN3Lj/YhklXvxj8mnyE/D6FpFduXJ2jg=
github.com/minio/cli v1.24.2/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY=
github.com/minio/colorjson v1.0.8 h1:AS6gEQ1dTRYHmC4xuoodPDRILHP/9Wz5wYUGDQfPLpg=
github.com/minio/colorjson v1.0.8/go.mod h1:wrs39G/4kqNlGjwqHvPlAnXuc2tlPszo6JKdSBCLN8w=
github.com/minio/console v1.7.6 h1:E0jq9nYMeW7z4iJtJ6vDt2hk4Jin0zcyAzRcTlaUO44=
github.com/minio/console v1.7.6/go.mod h1:gM4B3/7sqP17owJaoThmeDkfaDEQNZ1mjGU5DSwRReo=
github.com/minio/console v1.7.7-0.20250516212319-220a55500cc3 h1:8lBrtntYnTIkiyDkfgPr1/HgpDeGHFpACnG9OVdZFW0=
github.com/minio/console v1.7.7-0.20250516212319-220a55500cc3/go.mod h1:Jxp/p3RZctdaavbfRrIirQLMPlZ4IFEjInE9lzDtFjI=
github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/csvparser v1.0.0 h1:xJEHcYK8ZAjeW4hNV9Zu30u+/2o4UyPnYgyjWp8b7ZU=
@@ -442,8 +442,8 @@ github.com/minio/kms-go/kms v0.5.1-0.20250225090116-4e64ce8d0f35 h1:ISNz42SPD+he
github.com/minio/kms-go/kms v0.5.1-0.20250225090116-4e64ce8d0f35/go.mod h1:JFQu2srrnWxMn6KcwS5347oTwNKW7nkewgBlrodjF9k=
github.com/minio/madmin-go/v3 v3.0.109 h1:hRHlJ6yaIB3tlIj5mz9L9mGcyLC37S9qL1WtFrRtyQ0=
github.com/minio/madmin-go/v3 v3.0.109/go.mod h1:WOe2kYmYl1OIlY2DSRHVQ8j1v4OItARQ6jGyQqcCud8=
github.com/minio/mc v0.0.0-20250312172924-c1d5d4cbb4ca h1:Zeu+Gbsw/yoqJofAFaU3zbIVr51j9LULUrQqKFLQnGA=
github.com/minio/mc v0.0.0-20250312172924-c1d5d4cbb4ca/go.mod h1:h5UQZ+5Qfq6XV81E4iZSgStPZ6Hy+gMuHMkLkjq4Gys=
github.com/minio/mc v0.0.0-20250313080218-cf909e1063a9 h1:6RyInOHKL6jz8zxcAar/h6rg/aJCxDP/uFuSNvYSuMI=
github.com/minio/mc v0.0.0-20250313080218-cf909e1063a9/go.mod h1:h5UQZ+5Qfq6XV81E4iZSgStPZ6Hy+gMuHMkLkjq4Gys=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=

View File

@@ -16,11 +16,51 @@ spec:
ingress:
- ports:
- port: {{ .Values.minioAPIPort }}
protocol: TCP
- port: {{ .Values.minioConsolePort }}
protocol: TCP
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels:
{{ template "minio.name" . }}-client: "true"
{{- end }}
{{- if .Values.networkPolicy.egress.enabled }}
egress:
- ports:
{{ .Values.networkPolicy.egress.ports | toJson }}
{{- with .Values.networkPolicy.egress.to }}
to:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
---
kind: NetworkPolicy
apiVersion: {{ template "minio.networkPolicy.apiVersion" . }}
metadata:
name: {{ template "minio.fullname" . }}-post-job
labels:
app: {{ template "minio.name" . }}-post-job
chart: {{ template "minio.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
podSelector:
matchLabels:
app: {{ template "minio.name" . }}-job
release: {{ .Release.Name }}
egress:
- ports:
- port: {{ .Values.minioAPIPort }}
protocol: TCP
- port: {{ .Values.minioConsolePort }}
protocol: TCP
{{- if .Values.networkPolicy.egress.enabled }}
- ports:
{{ .Values.networkPolicy.egress.ports | toJson }}
{{- with .Values.networkPolicy.egress.to }}
to:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -200,9 +200,11 @@ service:
ingress:
enabled: false
ingressClassName: ~
labels: {}
labels:
{}
# node-role.kubernetes.io/ingress: platform
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
@@ -241,9 +243,11 @@ consoleService:
consoleIngress:
enabled: false
ingressClassName: ~
labels: {}
labels:
{}
# node-role.kubernetes.io/ingress: platform
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
@@ -391,7 +395,8 @@ makeUserJob:
## List of service accounts to be created after minio install
##
svcaccts: []
svcaccts:
[]
## accessKey, secretKey and parent user to be assigned to the service accounts
## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts
# - accessKey: console-svcacct
@@ -430,7 +435,8 @@ makeServiceAccountJob:
## List of buckets to be created after minio install
##
buckets: []
buckets:
[]
# # Name of the bucket
# - name: bucket1
# # Policy to be set on the
@@ -479,13 +485,15 @@ customCommandJob:
requests:
memory: 128Mi
## Additional volumes to add to the post-job.
extraVolumes: []
extraVolumes:
[]
# - name: extra-policies
# configMap:
# name: my-extra-policies-cm
## Additional volumeMounts to add to the custom commands container when
## running the post-job.
extraVolumeMounts: []
extraVolumeMounts:
[]
# - name: extra-policies
# mountPath: /mnt/extras/
# Command to run after the main command on exit
@@ -542,10 +550,35 @@ networkPolicy:
# Specifies whether the policies created will be standard Network Policies (flavor: kubernetes)
# or Cilium Network Policies (flavor: cilium)
flavor: kubernetes
# allows external access to the minio api
allowExternal: true
## @params networkPolicy.egress configuration of the egress traffic
egress:
## @param networkPolicy.egress.enabled When enabled, an egress network policy will be
## created allowing minio to connect to external data sources from kubernetes cluster.
##
enabled: false
## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress
## Add ports to the egress by specifying - port: <port number>
## E.X.
## - port: 80
## - port: 443
## - port: 53
## protocol: UDP
##
ports: []
## @param networkPolicy.egress.to Allow egress traffic to specific destinations
## Add destinations to the egress by specifying - ipBlock: <CIDR>
## E.X.
## to:
## - namespaceSelector:
## matchExpressions:
## - {key: role, operator: In, values: [minio]}
##
to: []
# only when using flavor: cilium
egressEntities:
- kube-apiserver
- kube-apiserver
## PodDisruptionBudget settings
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
@@ -573,7 +606,8 @@ metrics:
# for node metrics
relabelConfigs: {}
# for cluster metrics
relabelConfigsCluster: {}
relabelConfigsCluster:
{}
# metricRelabelings:
# - regex: (server|pod)
# action: labeldrop

View File

@@ -27,7 +27,6 @@ import (
"io"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/kms"
"github.com/secure-io/sio-go"
"github.com/secure-io/sio-go/sioutil"
@@ -64,7 +63,7 @@ func DecryptBytes(k *kms.KMS, ciphertext []byte, context kms.Context) ([]byte, e
// ciphertext.
func Encrypt(k *kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, error) {
algorithm := sio.AES_256_GCM
if !fips.Enabled && !sioutil.NativeAES() {
if !sioutil.NativeAES() {
algorithm = sio.ChaCha20Poly1305
}
@@ -145,9 +144,6 @@ func Decrypt(k *kms.KMS, ciphertext io.Reader, associatedData kms.Context) (io.R
if err := json.Unmarshal(metadataBuffer, &metadata); err != nil {
return nil, err
}
if fips.Enabled && metadata.Algorithm != sio.AES_256_GCM {
return nil, fmt.Errorf("config: unsupported encryption algorithm: %q is not supported in FIPS mode", metadata.Algorithm)
}
key, err := k.Decrypt(context.TODO(), &kms.DecryptRequest{
Name: metadata.KeyID,

View File

@@ -24,7 +24,7 @@ import (
"time"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/crypto"
"github.com/minio/pkg/v3/env"
xnet "github.com/minio/pkg/v3/net"
clientv3 "go.etcd.io/etcd/client/v3"
@@ -165,8 +165,8 @@ func LookupConfig(kvs config.KVS, rootCAs *x509.CertPool) (Config, error) {
MinVersion: tls.VersionTLS12,
NextProtos: []string{"http/1.1", "h2"},
ClientSessionCache: tls.NewLRUClientSessionCache(64),
CipherSuites: fips.TLSCiphersBackwardCompatible(),
CurvePreferences: fips.TLSCurveIDs(),
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
CurvePreferences: crypto.TLSCurveIDs(),
}
// This is only to support client side certificate authentication
// https://coreos.com/etcd/docs/latest/op-guide/security.html

View File

@@ -26,7 +26,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/crypto"
"github.com/minio/pkg/v3/ldap"
)
@@ -197,7 +197,7 @@ func Lookup(s config.Config, rootCAs *x509.CertPool) (l Config, err error) {
MinVersion: tls.VersionTLS12,
NextProtos: []string{"h2", "http/1.1"},
ClientSessionCache: tls.NewLRUClientSessionCache(100),
CipherSuites: fips.TLSCiphersBackwardCompatible(), // Contains RSA key exchange
CipherSuites: crypto.TLSCiphersBackwardCompatible(), // Contains RSA key exchange
RootCAs: rootCAs,
},
}

View File

@@ -11,9 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !fips
// +build !fips
package openid
import (
@@ -22,7 +19,7 @@ import (
"github.com/golang-jwt/jwt/v4"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
_ "golang.org/x/crypto/sha3"
)
// Specific instances for EC256 and company

View File

@@ -12,9 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !fips
// +build !fips
package openid
import (
@@ -23,7 +20,7 @@ import (
"github.com/golang-jwt/jwt/v4"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
_ "golang.org/x/crypto/sha3"
)
// Specific instances for RS256 and company

View File

@@ -15,22 +15,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Package fips provides functionality to configure cryptographic
// implementations compliant with FIPS 140.
//
// FIPS 140 [1] is a US standard for data processing that specifies
// requirements for cryptographic modules. Software that is "FIPS 140
// compliant" must use approved cryptographic primitives only and that
// are implemented by a FIPS 140 certified cryptographic module.
//
// So, FIPS 140 requires that a certified implementation of e.g. AES
// is used to implement more high-level cryptographic protocols.
// It does not require any specific security criteria for those
// high-level protocols. FIPS 140 focuses only on the implementation
// and usage of the most low-level cryptographic building blocks.
//
// [1]: https://en.wikipedia.org/wiki/FIPS_140
package fips
package crypto
import (
"crypto/tls"
@@ -38,40 +23,13 @@ import (
"github.com/minio/sio"
)
// Enabled indicates whether cryptographic primitives,
// like AES or SHA-256, are implemented using a FIPS 140
// certified module.
//
// If FIPS-140 is enabled no non-NIST/FIPS approved
// primitives must be used.
const Enabled = enabled
// DARECiphers returns a list of supported cipher suites
// for the DARE object encryption.
func DARECiphers() []byte {
if Enabled {
return []byte{sio.AES_256_GCM}
}
return []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305}
}
func DARECiphers() []byte { return []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305} }
// TLSCiphers returns a list of supported TLS transport
// cipher suite IDs.
//
// The list contains only ciphers that use AES-GCM or
// (non-FIPS) CHACHA20-POLY1305 and ellitpic curve key
// exchange.
func TLSCiphers() []uint16 {
if Enabled {
return []uint16{
tls.TLS_AES_128_GCM_SHA256, // TLS 1.3
tls.TLS_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
}
}
return []uint16{
tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3
tls.TLS_AES_128_GCM_SHA256,
@@ -92,24 +50,6 @@ func TLSCiphers() []uint16 {
// ciphers for backward compatibility. In particular, AES-CBC
// and non-ECDHE ciphers.
func TLSCiphersBackwardCompatible() []uint16 {
if Enabled {
return []uint16{
tls.TLS_AES_128_GCM_SHA256, // TLS 1.3
tls.TLS_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 ECDHE GCM
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // TLS 1.2 ECDHE CBC
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 non-ECDHE
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
}
}
return []uint16{
tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3
tls.TLS_AES_128_GCM_SHA256,
@@ -134,10 +74,5 @@ func TLSCiphersBackwardCompatible() []uint16 {
// TLSCurveIDs returns a list of supported elliptic curve IDs
// in preference order.
func TLSCurveIDs() []tls.CurveID {
var curves []tls.CurveID
if !Enabled {
curves = append(curves, tls.X25519) // Only enable X25519 in non-FIPS mode
}
curves = append(curves, tls.CurveP256, tls.CurveP384, tls.CurveP521)
return curves
return []tls.CurveID{tls.CurveP256, tls.X25519, tls.CurveP384, tls.CurveP521}
}

View File

@@ -27,7 +27,6 @@ import (
"io"
"path"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/hash/sha256"
"github.com/minio/minio/internal/logger"
"github.com/minio/sio"
@@ -98,7 +97,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str
mac.Write([]byte(SealAlgorithm))
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
mac.Sum(sealingKey[:0])
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:], CipherSuites: fips.DARECiphers()}); n != 64 || err != nil {
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:]}); n != 64 || err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to generate sealed key"))
}
sealedKey := SealedKey{
@@ -123,12 +122,12 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm))
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil)}
case InsecureSealAlgorithm:
sha := sha256.New()
sha.Write(extKey)
sha.Write(sealedKey.IV[:])
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.DARECiphers()}
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)}
}
if out, err := sio.DecryptBuffer(key[:0], sealedKey.Key[:], unsealConfig); len(out) != 32 || err != nil {
@@ -159,7 +158,7 @@ func (key ObjectKey) SealETag(etag []byte) []byte {
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
}
return buffer.Bytes()
@@ -175,5 +174,5 @@ func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
}
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte("SSE-etag"))
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil)})
}

View File

@@ -24,7 +24,6 @@ import (
"io"
"net/http"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/sio"
@@ -101,7 +100,7 @@ func unsealObjectKey(clientKey []byte, metadata map[string]string, bucket, objec
// EncryptSinglePart encrypts an io.Reader which must be the
// body of a single-part PUT request.
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:], CipherSuites: fips.DARECiphers()})
r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:]})
if err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt io.Reader using object key"))
}
@@ -123,7 +122,7 @@ func DecryptSinglePart(w io.Writer, offset, length int64, key ObjectKey) io.Writ
const PayloadSize = 1 << 16 // DARE 2.0
w = ioutil.LimitedWriter(w, offset%PayloadSize, length)
decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:], CipherSuites: fips.DARECiphers()})
decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:]})
if err != nil {
logger.CriticalIf(context.Background(), errors.New("Unable to decrypt io.Writer using object key"))
}

View File

@@ -381,7 +381,7 @@ func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int)
lockNotFound, lockRefreshed := 0, 0
done := false
for i := 0; i < len(restClnts); i++ {
for range len(restClnts) {
select {
case refreshResult := <-ch:
if refreshResult.offline {

View File

@@ -117,7 +117,6 @@ import (
"strconv"
"strings"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/sio"
@@ -346,8 +345,7 @@ func Decrypt(key []byte, etag ETag) (ETag, error) {
plaintext := make([]byte, 0, 16)
etag, err := sio.DecryptBuffer(plaintext, etag, sio.Config{
Key: decryptionKey,
CipherSuites: fips.DARECiphers(),
Key: decryptionKey,
})
if err != nil {
return nil, err

View File

@@ -357,7 +357,7 @@ func (list *TargetList) startSendWorkers(workerCount int) {
if err != nil {
panic(err)
}
for i := 0; i < workerCount; i++ {
for range workerCount {
wk.Take()
go func() {
defer wk.Give()

View File

@@ -1,25 +0,0 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:build fips && linux && amd64
// +build fips,linux,amd64
package fips
import _ "crypto/tls/fipsonly"
const enabled = true

View File

@@ -1,23 +0,0 @@
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:build !fips
// +build !fips
package fips
const enabled = false

View File

@@ -1041,7 +1041,7 @@ func (c *Connection) readStream(ctx context.Context, conn net.Conn, cancel conte
// Handle merged messages.
messages := int(m.Seq)
c.inMessages.Add(int64(messages))
for i := 0; i < messages; i++ {
for range messages {
if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected {
return
}

View File

@@ -143,7 +143,7 @@ func (t *TestGrid) WaitAllConnect(ctx context.Context) {
}
func getHosts(n int) (hosts []string, listeners []net.Listener, err error) {
for i := 0; i < n; i++ {
for range n {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {

View File

@@ -574,7 +574,7 @@ func (m *muxClient) ack(seq uint32) {
return
}
available := cap(m.outBlock)
for i := 0; i < available; i++ {
for range available {
m.outBlock <- struct{}{}
}
m.acked = true

View File

@@ -130,7 +130,7 @@ func newMuxStream(ctx context.Context, msg message, c *Connection, handler Strea
// Fill outbound block.
// Each token represents a message that can be sent to the client without blocking.
// The client will refill the tokens as they confirm delivery of the messages.
for i := 0; i < outboundCap; i++ {
for range outboundCap {
m.outBlock <- struct{}{}
}

View File

@@ -181,6 +181,9 @@ const (
AmzChecksumTypeFullObject = "FULL_OBJECT"
AmzChecksumTypeComposite = "COMPOSITE"
// S3 Express API related constant reject it.
AmzWriteOffsetBytes = "x-amz-write-offset-bytes"
// Post Policy related
AmzMetaUUID = "X-Amz-Meta-Uuid"
AmzMetaName = "X-Amz-Meta-Name"

View File

@@ -75,7 +75,10 @@ var matchingFuncNames = [...]string{
var (
quietFlag, jsonFlag, anonFlag bool
// Custom function to format error
errorFmtFunc func(string, error, bool) string
// can be registered by RegisterError
errorFmtFunc = func(introMsg string, err error, jsonFlag bool) string {
return fmt.Sprintf("msg: %s\n err:%s", introMsg, err)
}
)
// EnableQuiet - turns quiet option on.

View File

@@ -230,7 +230,7 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error {
}()
// Start parsers
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
for range runtime.GOMAXPROCS(0) {
go func() {
for in := range r.input {
if len(in.input) == 0 {

View File

@@ -173,7 +173,7 @@ func (r *PReader) startReaders() {
}()
// Start parsers
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
for range runtime.GOMAXPROCS(0) {
go func() {
for in := range r.input {
if len(in.input) == 0 {

View File

@@ -332,7 +332,7 @@ func (d *Decoder) u4() rune {
// logic taken from:
// github.com/buger/jsonparser/blob/master/escape.go#L20
var h [4]int
for i := 0; i < 4; i++ {
for i := range 4 {
c := d.next()
switch {
case c >= '0' && c <= '9':