Compare commits

..

1 Commits

Author SHA1 Message Date
Andreas Auernhammer
01cb705c36 crypto: add support for KMS key versions
This commit adds support for KMS master key versions.
Now, MinIO stores any key version information returned by the
KMS as part of the object metadata. The key version identifies
a particular master key within a master key ring. When encrypting/
generating a DEK, MinIO has to remember the key version - similar to
the key name. When decrypting a DEK, MinIO sends the key version to
the KMS such that the KMS can identify the exact key version that
should be used to decrypt the object.

Existing objects don't have a key version. Hence, this field will
be empty.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2025-05-05 22:35:43 +02:00
508 changed files with 4037 additions and 4810 deletions

View File

@@ -1,19 +1,14 @@
---
name: Bug report
about: Report a bug in MinIO (community edition is source-only)
about: Create a report to help us improve
title: ''
labels: community, triage
assignees: ''
---
## IMPORTANT NOTES
**Community Edition**: MinIO community edition is now source-only. Install via `go install github.com/minio/minio@latest`
**Feature Requests**: We are no longer accepting feature requests for the community edition. For feature requests and enterprise support, please subscribe to [MinIO Enterprise Support](https://min.io/pricing).
**Urgent Issues**: If this case is urgent or affects production, please subscribe to [SUBNET](https://min.io/pricing) for 24/7 enterprise support.
## NOTE
If this case is urgent, please subscribe to [Subnet](https://min.io/pricing) so that our 24/7 support team may help you faster.
<!--- Provide a general summary of the issue in the Title above -->

View File

@@ -2,7 +2,7 @@ blank_issues_enabled: false
contact_links:
- name: MinIO Community Support
url: https://slack.min.io
about: Community support via Slack - for questions and discussions
- name: MinIO Enterprise Support (SUBNET)
about: Join here for Community Support
- name: MinIO SUBNET Support
url: https://min.io/pricing
about: Enterprise support with SLA - for production deployments and feature requests
about: Join here for Enterprise Support

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: community, triage
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

59
.github/workflows/go-fips.yml vendored Normal file
View File

@@ -0,0 +1,59 @@
name: FIPS Build Test
on:
pull_request:
branches:
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build:
name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.24.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Setup dockerfile for build test
run: |
GO_VERSION=$(go version | cut -d ' ' -f 3 | sed 's/go//')
echo Detected go version $GO_VERSION
cat > Dockerfile.fips.test <<EOF
FROM golang:${GO_VERSION}
COPY . /minio
WORKDIR /minio
ENV GOEXPERIMENT=boringcrypto
RUN make
EOF
- name: Build
uses: docker/build-push-action@v3
with:
context: .
file: Dockerfile.fips.test
push: false
load: true
tags: minio/fips-test:latest
# This should fail if grep returns non-zero exit
- name: Test binary
run: |
docker run --rm minio/fips-test:latest ./minio --version
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio | grep FIPS | grep -q FIPS'

View File

@@ -1,14 +1,8 @@
FROM minio/minio:latest
ARG TARGETARCH
ARG RELEASE
RUN chmod -R 777 /usr/bin
COPY ./minio-${TARGETARCH}.${RELEASE} /usr/bin/minio
COPY ./minio-${TARGETARCH}.${RELEASE}.minisig /usr/bin/minio.minisig
COPY ./minio-${TARGETARCH}.${RELEASE}.sha256sum /usr/bin/minio.sha256sum
COPY ./minio /usr/bin/minio
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]

View File

@@ -1,93 +0,0 @@
# MinIO Pull Request Guidelines
These guidelines ensure high-quality commits in MinIOs GitHub repositories, maintaining
a clear, valuable commit history for our open-source projects. They apply to all contributors,
fostering efficient reviews and robust code.
## Why Pull Requests?
Pull Requests (PRs) drive quality in MinIOs codebase by:
- Enabling peer review without pair programming.
- Documenting changes for future reference.
- Ensuring commits tell a clear story of development.
**A poor commit lasts forever, even if code is refactored.**
## Crafting a Quality PR
A strong MinIO PR:
- Delivers a complete, valuable change (feature, bug fix, or improvement).
- Has a concise title (e.g., `[S3] Fix bucket policy parsing #1234`) and a summary with context, referencing issues (e.g., `#1234`).
- Contains well-written, logical commits explaining *why* changes were made (e.g., “Add S3 bucket tagging support so that users can organize resources efficiently”).
- Is small, focused, and easy to review—ideally one commit, unless multiple commits better narrate complex work.
- Adheres to MinIOs coding standards (e.g., Go style, error handling, testing).
PRs must flow smoothly through review to reach production. Large PRs should be split into smaller, manageable ones.
## Submitting PRs
1. **Title and Summary**:
- Use a scannable title: `[Subsystem] Action Description #Issue` (e.g., `[IAM] Add role-based access control #567`).
- Include context in the summary: what changed, why, and any issue references.
- Use `[WIP]` for in-progress PRs to avoid premature merging or choose GitHub draft PRs.
2. **Commits**:
- Write clear messages: what changed and why (e.g., “Refactor S3 API handler to reduce latency so that requests process 20% faster”).
- Rebase to tidy commits before submitting (e.g., `git rebase -i main` to squash typos or reword messages), unless multiple contributors worked on the branch.
- Keep PRs focused—one feature or fix. Split large changes into multiple PRs.
3. **Testing**:
- Include unit tests for new functionality or bug fixes.
- Ensure existing tests pass (`make test`).
- Document testing steps in the PR summary if manual testing was performed.
4. **Before Submitting**:
- Run `make verify` to check formatting, linting, and tests.
- Reference related issues (e.g., “Closes #1234”).
- Notify team members via GitHub `@mentions` if urgent or complex.
## Reviewing PRs
Reviewers ensure MinIOs commit history remains a clear, reliable record. Responsibilities include:
1. **Commit Quality**:
- Verify each commit explains *why* the change was made (e.g., “So that…”).
- Request rebasing if commits are unclear, redundant, or lack context (e.g., “Please squash typo fixes into the parent commit”).
2. **Code Quality**:
- Check adherence to MinIOs Go standards (e.g., error handling, documentation).
- Ensure tests cover new code and pass CI.
- Flag bugs or critical issues for immediate fixes; suggest non-blocking improvements as follow-up issues.
3. **Flow**:
- Review promptly to avoid blocking progress.
- Balance quality and speed—minor issues can be addressed later via issues, not PR blocks.
- If unable to complete the review, tag another reviewer (e.g., `@username please take over`).
4. **Shared Responsibility**:
- All MinIO contributors are reviewers. The first commenter on a PR owns the review unless they delegate.
- Multiple reviewers are encouraged for complex PRs.
5. **No Self-Edits**:
- Dont modify the PR directly (e.g., fixing bugs). Request changes from the submitter or create a follow-up PR.
- If you edit, youre a collaborator, not a reviewer, and cannot merge.
6. **Testing**:
- Assume the submitter tested the code. If testing is unclear, ask for details (e.g., “How was this tested?”).
- Reject untested PRs unless testing is infeasible, then assist with test setup.
## Tips for Success
- **Small PRs**: Easier to review, faster to merge. Split large changes logically.
- **Clear Commits**: Use `git rebase -i` to refine history before submitting.
- **Engage Early**: Discuss complex changes in issues or Slack (https://slack.min.io) before coding.
- **Be Responsive**: Address reviewer feedback promptly to keep PRs moving.
- **Learn from Reviews**: Use feedback to improve future contributions.
## Resources
- [MinIO Coding Standards](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
- [Effective Commit Messages](https://mislav.net/2014/02/hidden-documentation/)
- [GitHub PR Tips](https://github.com/blog/1943-how-to-write-the-perfect-pull-request)
By following these guidelines, we ensure MinIOs codebase remains high-quality, maintainable, and a joy to contribute to. Happy coding!

7
README.fips.md Normal file
View File

@@ -0,0 +1,7 @@
# MinIO FIPS Builds
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
MinIO FIPS executables are available at <http://dl.min.io> - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.

267
README.md
View File

@@ -4,154 +4,253 @@
[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io)
MinIO is a high-performance, S3-compatible object storage solution released under the GNU AGPL v3.0 license.
Designed for speed and scalability, it powers AI/ML, analytics, and data-intensive workloads with industry-leading performance.
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. To learn more about what MinIO is doing for AI storage, go to [AI storage documentation](https://min.io/solutions/object-storage-for-ai).
- S3 API Compatible Seamless integration with existing S3 tools
- Built for AI & Analytics Optimized for large-scale data pipelines
- High Performance Ideal for demanding storage workloads.
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
This README provides instructions for building MinIO from source and deploying onto baremetal hardware.
Use the [MinIO Documentation](https://github.com/minio/docs) project to build and host a local copy of the documentation.
## Container Installation
## MinIO is Open Source Software
Use the following commands to run a standalone MinIO server as a container.
We designed MinIO as Open Source software for the Open Source software community. We encourage the community to remix, redesign, and reshare MinIO under the terms of the AGPLv3 license.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
for more complete documentation.
All usage of MinIO in your application stack requires validation against AGPLv3 obligations, which include but are not limited to the release of modified code to the community from which you have benefited. Any commercial/proprietary usage of the AGPLv3 software, including repackaging or reselling services/features, is done at your own risk.
### Stable
The AGPLv3 provides no obligation by any party to support, maintain, or warranty the original or any modified work.
All support is provided on a best-effort basis through Github and our [Slack](https//slack.min.io) channel, and any member of the community is welcome to contribute and assist others in their usage of the software.
Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume:
MinIO [AIStor](https://www.min.io/product/aistor) includes enterprise-grade support and licensing for workloads which require commercial or proprietary usage and production-level SLA/SLO-backed support. For more information, [reach out for a quote](https://min.io/pricing).
```sh
podman run -p 9000:9000 -p 9001:9001 \
quay.io/minio/minio server /data --console-address ":9001"
```
## Source-Only Distribution
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded
object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the
root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
**Important:** The MinIO community edition is now distributed as source code only. We will no longer provide pre-compiled binary releases for the community version.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
### Installing Latest MinIO Community Edition
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
To use MinIO community edition, you have two options:
## macOS
1. **Install from source** using `go install github.com/minio/minio@latest` (recommended)
2. **Build a Docker image** from the provided Dockerfile
Use the following commands to run a standalone MinIO server on macOS.
See the sections below for detailed instructions on each method.
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
### Legacy Binary Releases
### Homebrew (recommended)
Historical pre-compiled binary releases remain available for reference but are no longer maintained:
- GitHub Releases: https://github.com/minio/minio/releases
- Direct downloads: https://dl.min.io/server/minio/release/
Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
**These legacy binaries will not receive updates.** We strongly recommend using source builds for access to the latest features, bug fixes, and security updates.
```sh
brew install minio/stable/minio
minio server /data
```
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
```sh
brew uninstall minio
brew install minio/stable/minio
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
### Binary Download
Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
wget https://dl.min.io/server/minio/release/darwin-amd64/minio
chmod +x minio
./minio server /data
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
## GNU/Linux
Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data.
```sh
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
./minio server /data
```
The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host.
| Architecture | URL |
| -------- | ------ |
| 64-bit Intel/AMD | <https://dl.min.io/server/minio/release/linux-amd64/minio> |
| 64-bit ARM | <https://dl.min.io/server/minio/release/linux-arm64/minio> |
| 64-bit PowerPC LE (ppc64le) | <https://dl.min.io/server/minio/release/linux-ppc64le/minio> |
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Microsoft Windows
To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL:
```sh
https://dl.min.io/server/minio/release/windows-amd64/minio.exe
```
Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``:
```sh
minio.exe server D:\
```
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
## Install from Source
Use the following commands to compile and run a standalone MinIO server from source.
If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable)
```sh
go install github.com/minio/minio@latest
```
You can alternatively run `go build` and use the `GOOS` and `GOARCH` environment variables to control the OS and architecture target.
For example:
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
```
env GOOS=linux GOARCh=arm64 go build
```
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
Start MinIO by running `minio server PATH` where `PATH` is any empty folder on your local filesystem.
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`.
You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server.
Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials.
You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool:
## Deployment Recommendations
### Allow port access for Firewalls
By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port.
### ufw
For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000
```sh
mc alias set local http://localhost:9000 minioadmin minioadmin
mc admin info local
ufw allow 9000
```
See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool.
For application developers, see <https://docs.min.io/enterprise/aistor-object-store/developers/sdk/> to view MinIO SDKs for supported languages.
> [!NOTE]
> Production environments using compiled-from-source MinIO binaries do so at their own risk.
> The AGPLv3 license provides no warranties nor liabilites for any such usage.
## Build Docker Image
You can use the `docker build .` command to build a Docker image on your local host machine.
You must first [build MinIO](#install-from-source) and ensure the `minio` binary exists in the project root.
The following command builds the Docker image using the default `Dockerfile` in the root project directory with the repository and image tag `myminio:minio`
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
docker build -t myminio:minio .
ufw allow 9000:9010/tcp
```
Use `docker image ls` to confirm the image exists in your local repository.
You can run the server using standard Docker invocation:
### firewall-cmd
For hosts with firewall-cmd enabled (CentOS), you can use `firewall-cmd` command to allow traffic to specific ports. Use below commands to allow access to port 9000
```sh
docker run -p 9000:9000 -p 9001:9001 myminio:minio server /tmp/minio --console-address :9001
firewall-cmd --get-active-zones
```
Complete documentation for building Docker containers, managing custom images, or loading images into orchestration platforms is out of scope for this documentation.
You can modify the `Dockerfile` and `dockerscripts/docker-entrypoint.sh` as-needed to reflect your specific image requirements.
This command gets the active zone(s). Now, apply port rules to the relevant zones returned above. For example if the zone is `public`, use
See the [MinIO Container](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html#deploy-minio-container) documentation for more guidance on running MinIO within a Container image.
```sh
firewall-cmd --zone=public --add-port=9000/tcp --permanent
```
## Install using Helm Charts
Note that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
There are two paths for installing MinIO onto Kubernetes infrastructure:
```sh
firewall-cmd --reload
```
- Use the [MinIO Operator](https://github.com/minio/operator)
- Use the community-maintained [Helm charts](https://github.com/minio/minio/tree/master/helm/minio)
### iptables
See the [MinIO Documentation](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html) for guidance on deploying using the Operator.
The Community Helm chart has instructions in the folder-level README.
For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow
access to port 9000
```sh
iptables -A INPUT -p tcp --dport 9000 -j ACCEPT
service iptables restart
```
Below command enables all incoming traffic to ports ranging from 9000 to 9010.
```sh
iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```
## Test MinIO Connectivity
### Test using MinIO Console
MinIO Server comes with an embedded web based object browser.
Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
> [!NOTE]
> MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
> NOTE: MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
### Test using MinIO Client `mc`
### Things to consider
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services.
MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work.
The following commands set a local alias, validate the server information, create a bucket, copy data to that bucket, and list the contents of the bucket.
For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically.
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
| Dashboard | Creating a bucket |
| ------------- | ------------- |
| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) |
## Test using MinIO Client `mc`
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
## Upgrading MinIO
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
```sh
mc alias set local http://localhost:9000 minioadmin minioadmin
mc admin info
mc mb data
mc cp ~/Downloads/mydata data/
mc ls data/
mc admin update <minio alias, e.g., myminio>
```
Follow the MinIO Client [Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) for further instructions.
- For deployments without external internet access (e.g. airgapped environments), download the binary from <https://dl.min.io> and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`.
- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`.
### Upgrade Checklist
- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk.
- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest release upon every release. Some release may not be relevant to your setup, avoid upgrading production environments unnecessarily.
- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system.
- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images.
- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.**
## Explore Further
- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html)
- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html)
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/enterprise/aistor-object-store/developers/sdk/go/)
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
## Contribute to MinIO Project
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md) for guidance on making new contributions to the repository.
Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
## License

View File

@@ -74,11 +74,11 @@ check_minimum_version() {
assert_is_supported_arch() {
case "${ARCH}" in
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64)
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]"
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
exit 1
;;
esac

View File

@@ -9,7 +9,7 @@ function _init() {
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64"
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
}
function _build() {

View File

@@ -193,27 +193,27 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) {
result.Cfg, err = readServerConfig(ctx, objectAPI, nil)
if err != nil {
return result, err
return
}
result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes))
if err != nil {
return result, err
return
}
result.SubSys, _, _, err = config.GetSubSys(string(kvBytes))
if err != nil {
return result, err
return
}
tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes))
if err != nil {
return result, err
return
}
ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts)
if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil {
err = badConfigErr{Err: verr}
return result, err
return
}
// Check if subnet proxy being set and if so set the same value to proxy of subnet
@@ -222,12 +222,12 @@ func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (re
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil {
return result, err
return
}
// Write the config input KV to history.
err = saveServerConfigHistory(ctx, objectAPI, kvBytes)
return result, err
return
}
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}

View File

@@ -445,10 +445,8 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
for _, svc := range serviceAccounts {
expiryTime := svc.Expiration
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
AccessKey: svc.AccessKey,
Expiration: &expiryTime,
Name: svc.Name,
Description: svc.Description,
AccessKey: svc.AccessKey,
Expiration: &expiryTime,
})
}
for _, sts := range stsKeys {
@@ -627,10 +625,8 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
}
for _, svc := range serviceAccounts {
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
AccessKey: svc.AccessKey,
Expiration: &svc.Expiration,
Name: svc.Name,
Description: svc.Description,
AccessKey: svc.AccessKey,
Expiration: &svc.Expiration,
})
}
// if only service accounts, skip if user has no service accounts

View File

@@ -173,8 +173,6 @@ func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *htt
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
continue // skip if no roleArn and no policy claim
}
// claim-based provider is in the roleArnMap under dummy ARN
arn = dummyRoleARN
}
matchingCfgName, ok := roleArnMap[arn]
if !ok {

View File

@@ -61,7 +61,7 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
return
}
if z.IsRebalanceStarted(ctx) {
if z.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
@@ -277,7 +277,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
return
}
if pools.IsRebalanceStarted(ctx) {
if pools.IsRebalanceStarted() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
return
}
@@ -380,7 +380,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
if host == "" {
return proxy
return
}
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Host == host && !proxyEp.IsLocal {
@@ -389,5 +389,5 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h
}
}
}
return proxy
return
}

View File

@@ -70,7 +70,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
return opts
return
}
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
@@ -304,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
}
}
func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error {
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
data, err := io.ReadAll(body)
if err != nil {
return SRError{
@@ -422,7 +422,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
return opts
return
}
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
@@ -484,7 +484,7 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
opts.EntityValue = q.Get("entityvalue")
opts.ShowDeleted = q.Get("showDeleted") == "true"
opts.Metrics = q.Get("metrics") == "true"
return opts
return
}
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove

View File

@@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
// Create a policy policy
policy := "mypolicy"
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
]
}
]
}`, bucket)
}`, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@@ -113,7 +113,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
userCount := 50
accessKeys := make([]string, userCount)
secretKeys := make([]string, userCount)
for i := range userCount {
for i := 0; i < userCount; i++ {
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
@@ -133,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
}
g := errgroup.Group{}
for i := range userCount {
for i := 0; i < userCount; i++ {
g.Go(func(i int) func() error {
return func() error {
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")

View File

@@ -24,7 +24,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"os"
"slices"
@@ -158,7 +157,9 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
maps.Copy(allCredentials, ldapUsers)
for k, v := range ldapUsers {
allCredentials[k] = v
}
// Marshal the response
data, err := json.Marshal(allCredentials)
@@ -1826,18 +1827,16 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
iamLogIf(ctx, err)
} else if foundGroupDN == nil || !underBaseDN {
err = errNoSuchGroup
} else {
entityName = foundGroupDN.NormDN
}
entityName = foundGroupDN.NormDN
} else {
var foundUserDN *xldap.DNSearchResult
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
iamLogIf(ctx, err)
} else if foundUserDN == nil {
err = errNoSuchUser
} else {
entityName = foundUserDN.NormDN
}
entityName = foundUserDN.NormDN
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@@ -2948,7 +2947,7 @@ func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.
name: createReq.Name,
description: description,
expiration: createReq.Expiration,
claims: make(map[string]any),
claims: make(map[string]interface{}),
}
condValues := getConditionValues(r, "", cred)

View File

@@ -208,8 +208,6 @@ func TestIAMInternalIDPServerSuite(t *testing.T) {
suite.TestGroupAddRemove(c)
suite.TestServiceAccountOpsByAdmin(c)
suite.TestServiceAccountPrivilegeEscalationBug(c)
suite.TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c, true)
suite.TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c, false)
suite.TestServiceAccountOpsByUser(c)
suite.TestServiceAccountDurationSecondsCondition(c)
suite.TestAddServiceAccountPerms(c)
@@ -334,7 +332,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
// 2.2 create and associate policy to user
policy := "mypolicy-test-user-update"
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -357,7 +355,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
]
}
]
}`, bucket, bucket)
}`, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@@ -564,7 +562,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
// 1. Create a policy
policy := "mypolicy"
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -587,7 +585,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
]
}
]
}`, bucket, bucket)
}`, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@@ -682,7 +680,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
c.Fatalf("bucket creat error: %v", err)
}
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -705,7 +703,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
]
}
]
}`, bucket, bucket)
}`, bucket, bucket))
// Check that default policies can be overwritten.
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
@@ -741,7 +739,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
}
policy := "mypolicy"
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -764,7 +762,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
]
}
]
}`, bucket, bucket)
}`, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@@ -913,7 +911,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -936,7 +934,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
]
}
]
}`, bucket, bucket)
}`, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@@ -997,7 +995,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -1028,7 +1026,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
]
}
]
}`, bucket, bucket)
}`, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@@ -1095,7 +1093,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -1118,7 +1116,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
]
}
]
}`, bucket, bucket)
}`, bucket, bucket))
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@@ -1251,108 +1249,6 @@ func (s *TestSuiteIAM) TestServiceAccountPrivilegeEscalationBug(c *check) {
}
}
func (s *TestSuiteIAM) TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c *check, forRoot bool) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
for i := range 3 {
err := s.client.MakeBucket(ctx, fmt.Sprintf("bucket%d", i+1), minio.MakeBucketOptions{})
if err != nil {
c.Fatalf("bucket create error: %v", err)
}
defer func(i int) {
_ = s.client.RemoveBucket(ctx, fmt.Sprintf("bucket%d", i+1))
}(i)
}
allow2BucketsPolicyBytes := []byte(`{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "ListBucket1AndBucket2",
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": ["arn:aws:s3:::bucket1", "arn:aws:s3:::bucket2"]
},
{
"Sid": "ReadWriteBucket1AndBucket2Objects",
"Effect": "Allow",
"Action": [
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:GetObjectVersion",
"s3:PutObject"
],
"Resource": ["arn:aws:s3:::bucket1/*", "arn:aws:s3:::bucket2/*"]
}
]
}`)
if forRoot {
// Create a service account for the root user.
_, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: allow2BucketsPolicyBytes,
AccessKey: "restricted",
SecretKey: "restricted123",
})
if err != nil {
c.Fatalf("could not create service account")
}
defer func() {
_ = s.adm.DeleteServiceAccount(ctx, "restricted")
}()
} else {
// Create a regular user and attach consoleAdmin policy
err := s.adm.AddUser(ctx, "foobar", "foobar123")
if err != nil {
c.Fatalf("could not create user")
}
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{"consoleAdmin"},
User: "foobar",
})
if err != nil {
c.Fatalf("could not attach policy")
}
// Create a service account for the regular user.
_, err = s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: allow2BucketsPolicyBytes,
TargetUser: "foobar",
AccessKey: "restricted",
SecretKey: "restricted123",
})
if err != nil {
c.Fatalf("could not create service account: %v", err)
}
defer func() {
_ = s.adm.DeleteServiceAccount(ctx, "restricted")
_ = s.adm.RemoveUser(ctx, "foobar")
}()
}
restrictedClient := s.getUserClient(c, "restricted", "restricted123", "")
buckets, err := restrictedClient.ListBuckets(ctx)
if err != nil {
c.Fatalf("err fetching buckets %s", err)
}
if len(buckets) != 2 || buckets[0].Name != "bucket1" || buckets[1].Name != "bucket2" {
c.Fatalf("restricted service account should only have access to bucket1 and bucket2")
}
// Try to escalate privileges
restrictedAdmClient := s.getAdminClient(c, "restricted", "restricted123", "")
_, err = restrictedAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
AccessKey: "newroot",
SecretKey: "newroot123",
})
if err == nil {
c.Fatalf("restricted service account was able to create service account bypassing sub-policy!")
}
}
func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
@@ -1471,7 +1367,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects.
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -1485,7 +1381,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
]
}
]
}`, bucket)
}`, bucket))
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes,
TargetUser: accessKey,
@@ -1662,7 +1558,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
c.Helper()
versions := []string{}
for range 5 {
for i := 0; i < 5; i++ {
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if err != nil {
c.Fatalf("upload did not succeed got %#v", err)
@@ -1731,7 +1627,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects.
policyBytes := fmt.Appendf(nil, `{
policyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -1745,7 +1641,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
]
}
]
}`, bucket)
}`, bucket))
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes,
TargetUser: accessKey,
@@ -1759,7 +1655,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
c.mustNotListObjects(ctx, svcClient, bucket)
// This policy allows listing objects.
newPolicyBytes := fmt.Appendf(nil, `{
newPolicyBytes := []byte(fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@@ -1772,7 +1668,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
]
}
]
}`, bucket)
}`, bucket))
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
NewPolicy: newPolicyBytes,
})

View File

@@ -954,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ
var args dsync.LockArgs
var lockers []dsync.NetLocker
for path := range strings.SplitSeq(vars["paths"], ",") {
for _, path := range strings.Split(vars["paths"], ",") {
if path == "" {
continue
}
@@ -1193,7 +1193,7 @@ type dummyFileInfo struct {
mode os.FileMode
modTime time.Time
isDir bool
sys any
sys interface{}
}
func (f dummyFileInfo) Name() string { return f.name }
@@ -1201,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size }
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
func (f dummyFileInfo) IsDir() bool { return f.isDir }
func (f dummyFileInfo) Sys() any { return f.sys }
func (f dummyFileInfo) Sys() interface{} { return f.sys }
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
// ----------
@@ -1243,17 +1243,17 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if hip.objPrefix != "" {
// Bucket is required if object-prefix is given
err = ErrHealMissingBucket
return hip, err
return
}
} else if isReservedOrInvalidBucket(hip.bucket, false) {
err = ErrInvalidBucketName
return hip, err
return
}
// empty prefix is valid.
if !IsValidObjectPrefix(hip.objPrefix) {
err = ErrInvalidObjectName
return hip, err
return
}
if len(qParams[mgmtClientToken]) > 0 {
@@ -1275,7 +1275,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if (hip.forceStart && hip.forceStop) ||
(hip.clientToken != "" && (hip.forceStart || hip.forceStop)) {
err = ErrInvalidRequest
return hip, err
return
}
// ignore body if clientToken is provided
@@ -1284,12 +1284,12 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if jerr != nil {
adminLogIf(GlobalContext, jerr, logger.ErrorKind)
err = ErrRequestBodyParse
return hip, err
return
}
}
err = ErrNone
return hip, err
return
}
// HealHandler - POST /minio/admin/v3/heal/
@@ -2022,7 +2022,7 @@ func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err err
opts.OS = true
// Older mc - cannot deal with more types...
}
return opts, err
return
}
// TraceHandler - POST /minio/admin/v3/trace

View File

@@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool {
func TestTopLockEntries(t *testing.T) {
locksHeld := make(map[string][]lockRequesterInfo)
var owners []string
for i := range 4 {
for i := 0; i < 4; i++ {
owners = append(owners, fmt.Sprintf("node-%d", i))
}
@@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) {
// request UID, but 10 different resource names associated with it.
var lris []lockRequesterInfo
uuid := mustGetUUID()
for i := range 10 {
for i := 0; i < 10; i++ {
resource := fmt.Sprintf("bucket/delete-object-%d", i)
lri := lockRequesterInfo{
Name: resource,
@@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) {
}
// Add a few concurrent read locks to the mix
for i := range 50 {
for i := 0; i < 50; i++ {
resource := fmt.Sprintf("bucket/get-object-%d", i)
lri := lockRequesterInfo{
Name: resource,

View File

@@ -22,7 +22,6 @@ import (
"encoding/json"
"errors"
"fmt"
"maps"
"net/http"
"sort"
"sync"
@@ -521,7 +520,9 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
maps.Copy(retMap, h.scannedItemsMap)
for k, v := range h.scannedItemsMap {
retMap[k] = v
}
return retMap
}
@@ -533,7 +534,9 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
maps.Copy(retMap, h.healedItemsMap)
for k, v := range h.healedItemsMap {
retMap[k] = v
}
return retMap
}
@@ -546,7 +549,9 @@ func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
maps.Copy(retMap, h.healFailedItemsMap)
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
return retMap
}

View File

@@ -296,7 +296,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag))
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam-v2").HandlerFunc(adminMiddleware(adminAPI.ImportIAMV2, noGZFlag))
// Identity Provider configuration APIs
// IDentity Provider configuration APIs
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.UpdateIdentityProviderCfg))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}").HandlerFunc(adminMiddleware(adminAPI.ListIdentityProviderCfg))

View File

@@ -23,7 +23,6 @@ import (
"encoding/json"
"encoding/xml"
"fmt"
"mime"
"net/http"
"strconv"
"strings"
@@ -65,7 +64,7 @@ func setCommonHeaders(w http.ResponseWriter) {
}
// Encodes the response headers into XML format.
func encodeResponse(response any) []byte {
func encodeResponse(response interface{}) []byte {
var buf bytes.Buffer
buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
@@ -83,7 +82,7 @@ func encodeResponse(response any) []byte {
// Do not use this function for anything other than ListObjects()
// variants, please open a github discussion if you wish to use
// this in other places.
func encodeResponseList(response any) []byte {
func encodeResponseList(response interface{}) []byte {
var buf bytes.Buffer
buf.WriteString(xxml.Header)
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
@@ -94,7 +93,7 @@ func encodeResponseList(response any) []byte {
}
// Encodes the response headers into JSON format.
func encodeResponseJSON(response any) []byte {
func encodeResponseJSON(response interface{}) []byte {
var bytesBuffer bytes.Buffer
e := json.NewEncoder(&bytesBuffer)
e.Encode(response)
@@ -169,32 +168,6 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
if !stringsHasPrefixFold(k, userMetadataPrefix) {
continue
}
// check the doc https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
// For metadata values like "ö", "ÄMÄZÕÑ S3", and "öha, das sollte eigentlich
// funktionieren", tested against a real AWS S3 bucket, S3 may encode incorrectly. For
// example, "ö" was encoded as =?UTF-8?B?w4PCtg==?=, producing invalid UTF-8 instead
// of =?UTF-8?B?w7Y=?=. This mirrors errors like the ä½ in another string.
//
// S3 uses B-encoding (Base64) for non-ASCII-heavy metadata and Q-encoding
// (quoted-printable) for mostly ASCII strings. Long strings are split at word
// boundaries to fit RFC 2047s 75-character limit, ensuring HTTP parser
// compatibility.
//
// However, this splitting increases header size and can introduce errors, unlike Gos
// mime package in MinIO, which correctly encodes strings with fixed B/Q encodings,
// avoiding S3s heuristic-driven issues.
//
// For MinIO developers, decode S3 metadata with mime.WordDecoder, validate outputs,
// report encoding bugs to AWS, and use ASCII-only metadata to ensure reliable S3 API
// compatibility.
if needsMimeEncoding(v) {
// see https://github.com/golang/go/blob/release-branch.go1.24/src/net/mail/message.go#L325
if strings.ContainsAny(v, "\"#$%&'(),.:;<>@[]^`{|}~") {
v = mime.BEncoding.Encode("UTF-8", v)
} else {
v = mime.QEncoding.Encode("UTF-8", v)
}
}
w.Header()[strings.ToLower(k)] = []string{v}
isSet = true
break
@@ -256,14 +229,3 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
return nil
}
// needsEncoding reports whether s contains any bytes that need to be encoded.
// see mime.needsEncoding
func needsMimeEncoding(s string) bool {
for _, b := range s {
if (b < ' ' || b > '~') && b != '\t' {
return true
}
}
return false
}

View File

@@ -31,7 +31,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return prefix, marker, delimiter, maxkeys, encodingType, errCode
return
}
} else {
maxkeys = maxObjectList
@@ -41,7 +41,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return prefix, marker, delimiter, maxkeys, encodingType, errCode
return
}
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
@@ -51,7 +51,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
return
}
} else {
maxkeys = maxObjectList
@@ -62,7 +62,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker")
return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
return
}
// Parse bucket url queries for ListObjects V2.
@@ -73,7 +73,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
if val, ok := values["continuation-token"]; ok {
if len(val[0]) == 0 {
errCode = ErrIncorrectContinuationToken
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
return
}
}
@@ -81,7 +81,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
return
}
} else {
maxkeys = maxObjectList
@@ -97,11 +97,11 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
decodedToken, err := base64.StdEncoding.DecodeString(token)
if err != nil {
errCode = ErrIncorrectContinuationToken
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
return
}
token = string(decodedToken)
}
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
return
}
// Parse bucket url queries for ?uploads
@@ -112,7 +112,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
var err error
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
errCode = ErrInvalidMaxUploads
return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
return
}
} else {
maxUploads = maxUploadsList
@@ -123,7 +123,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
return
}
// Parse object url queries
@@ -134,7 +134,7 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
if values.Get("max-parts") != "" {
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
errCode = ErrInvalidMaxParts
return uploadID, partNumberMarker, maxParts, encodingType, errCode
return
}
} else {
maxParts = maxPartsList
@@ -143,11 +143,11 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
if values.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
errCode = ErrInvalidPartNumberMarker
return uploadID, partNumberMarker, maxParts, encodingType, errCode
return
}
}
uploadID = values.Get("uploadId")
encodingType = values.Get("encoding-type")
return uploadID, partNumberMarker, maxParts, encodingType, errCode
return
}

View File

@@ -889,12 +889,6 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
}
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
// Don't write a response if one has already been written.
// Fixes https://github.com/minio/minio/issues/21633
if headersAlreadyWritten(w) {
return
}
if statusCode == 0 {
statusCode = 200
}
@@ -1021,45 +1015,3 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
type unwrapper interface {
Unwrap() http.ResponseWriter
}
// headersAlreadyWritten returns true if the headers have already been written
// to this response writer. It will unwrap the ResponseWriter if possible to try
// and find a trackingResponseWriter.
func headersAlreadyWritten(w http.ResponseWriter) bool {
for {
if trw, ok := w.(*trackingResponseWriter); ok {
return trw.headerWritten
} else if uw, ok := w.(unwrapper); ok {
w = uw.Unwrap()
} else {
return false
}
}
}
// trackingResponseWriter wraps a ResponseWriter and notes when WriterHeader has
// been called. This allows high level request handlers to check if something
// has already sent the header.
type trackingResponseWriter struct {
http.ResponseWriter
headerWritten bool
}
func (w *trackingResponseWriter) WriteHeader(statusCode int) {
if !w.headerWritten {
w.headerWritten = true
w.ResponseWriter.WriteHeader(statusCode)
}
}
func (w *trackingResponseWriter) Write(b []byte) (int, error) {
return w.ResponseWriter.Write(b)
}
func (w *trackingResponseWriter) Unwrap() http.ResponseWriter {
return w.ResponseWriter
}

View File

@@ -18,12 +18,8 @@
package cmd
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/klauspost/compress/gzhttp"
)
// Tests object location.
@@ -104,6 +100,7 @@ func TestObjectLocation(t *testing.T) {
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
@@ -126,89 +123,3 @@ func TestGetURLScheme(t *testing.T) {
t.Errorf("Expected %s, got %s", httpsScheme, gotScheme)
}
}
func TestTrackingResponseWriter(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
trw.WriteHeader(123)
if !trw.headerWritten {
t.Fatal("headerWritten was not set by WriteHeader call")
}
_, err := trw.Write([]byte("hello"))
if err != nil {
t.Fatalf("Write unexpectedly failed: %v", err)
}
// Check that WriteHeader and Write were called on the underlying response writer
resp := rw.Result()
if resp.StatusCode != 123 {
t.Fatalf("unexpected status: %v", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("reading response body failed: %v", err)
}
if string(body) != "hello" {
t.Fatalf("response body incorrect: %v", string(body))
}
// Check that Unwrap works
if trw.Unwrap() != rw {
t.Fatalf("Unwrap returned wrong result: %v", trw.Unwrap())
}
}
func TestHeadersAlreadyWritten(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
if headersAlreadyWritten(trw) {
t.Fatal("headers have not been written yet")
}
trw.WriteHeader(123)
if !headersAlreadyWritten(trw) {
t.Fatal("headers were written")
}
}
func TestHeadersAlreadyWrittenWrapped(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
wrap1 := &gzhttp.NoGzipResponseWriter{ResponseWriter: trw}
wrap2 := &gzhttp.NoGzipResponseWriter{ResponseWriter: wrap1}
if headersAlreadyWritten(wrap2) {
t.Fatal("headers have not been written yet")
}
wrap2.WriteHeader(123)
if !headersAlreadyWritten(wrap2) {
t.Fatal("headers were written")
}
}
func TestWriteResponseHeadersNotWritten(t *testing.T) {
rw := httptest.NewRecorder()
trw := &trackingResponseWriter{ResponseWriter: rw}
writeResponse(trw, 299, []byte("hello"), "application/foo")
resp := rw.Result()
if resp.StatusCode != 299 {
t.Fatal("response wasn't written")
}
}
func TestWriteResponseHeadersWritten(t *testing.T) {
rw := httptest.NewRecorder()
rw.Code = -1
trw := &trackingResponseWriter{ResponseWriter: rw, headerWritten: true}
writeResponse(trw, 200, []byte("hello"), "application/foo")
if rw.Code != -1 {
t.Fatalf("response was written when it shouldn't have been (Code=%v)", rw.Code)
}
}

View File

@@ -218,8 +218,6 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
handlerName := getHandlerName(f, "objectAPIHandlers")
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
w = &trackingResponseWriter{ResponseWriter: w}
// Wrap the actual handler with the appropriate tracing middleware.
var tracedHandler http.HandlerFunc
if handlerFlags.has(traceHdrsS3HFlag) {
@@ -389,11 +387,6 @@ func registerAPIRouter(router *mux.Router) {
HeadersRegexp(xhttp.AmzSnowballExtract, "true").
HandlerFunc(s3APIMiddleware(api.PutObjectExtractHandler, traceHdrsS3HFlag))
// AppendObject to be rejected
router.Methods(http.MethodPut).Path("/{object:.+}").
HeadersRegexp(xhttp.AmzWriteOffsetBytes, "").
HandlerFunc(s3APIMiddleware(errorResponseHandler))
// PutObject
router.Methods(http.MethodPut).Path("/{object:.+}").
HandlerFunc(s3APIMiddleware(api.PutObjectHandler, traceHdrsS3HFlag))

View File

@@ -43,7 +43,7 @@ func shouldEscape(c byte) bool {
// - Force encoding of '~'
func s3URLEncode(s string) string {
spaceCount, hexCount := 0, 0
for i := range len(s) {
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
if c == ' ' {
@@ -70,7 +70,7 @@ func s3URLEncode(s string) string {
if hexCount == 0 {
copy(t, s)
for i := range len(s) {
for i := 0; i < len(s); i++ {
if s[i] == ' ' {
t[i] = '+'
}
@@ -79,7 +79,7 @@ func s3URLEncode(s string) string {
}
j := 0
for i := range len(s) {
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ':
t[j] = '+'

View File

@@ -216,7 +216,7 @@ func getSessionToken(r *http.Request) (token string) {
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]any {
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(getSessionToken(r))
return claims
}
@@ -266,7 +266,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error)
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(token string) (map[string]any, error) {
func getClaimsFromToken(token string) (map[string]interface{}, error) {
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
if err != nil {
return nil, err
@@ -275,7 +275,7 @@ func getClaimsFromToken(token string) (map[string]any, error) {
}
// Fetch claims in the security token returned by the client and validate the token.
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) {
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
token := getSessionToken(r)
if token != "" && cred.AccessKey == "" {
// x-amz-security-token is not allowed for anonymous access.

View File

@@ -102,7 +102,7 @@ func waitForLowHTTPReq() {
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
bgSeq := newBgHealSequence()
// Run the background healer
for range globalBackgroundHealRoutine.workers {
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
}

View File

@@ -24,7 +24,6 @@ import (
"fmt"
"io"
"os"
"slices"
"sort"
"strings"
"sync"
@@ -270,7 +269,12 @@ func (h *healingTracker) delete(ctx context.Context) error {
func (h *healingTracker) isHealed(bucket string) bool {
h.mu.RLock()
defer h.mu.RUnlock()
return slices.Contains(h.HealedBuckets, bucket)
for _, v := range h.HealedBuckets {
if v == bucket {
return true
}
}
return false
}
// resume will reset progress to the numbers at the start of the bucket.

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"time"

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"net/http"
"net/url"
@@ -249,7 +248,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
pInfo PartInfo
)
for i := range partsCount {
for i := 0; i < partsCount; i++ {
gopts := minio.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
PartNumber: i + 1,
@@ -575,7 +574,9 @@ func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass
}
maps.Copy(oi.UserDefined, objInfo.UserMetadata)
for k, v := range objInfo.UserMetadata {
oi.UserDefined[k] = v
}
return oi
}

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error {
type BatchJobSize int64
// UnmarshalYAML to parse humanized byte values
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error {
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
var batchExpireSz string
err := unmarshal(&batchExpireSz)
if err != nil {

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -21,7 +21,6 @@ import (
"context"
"encoding/base64"
"fmt"
"maps"
"math/rand"
"net/http"
"runtime"
@@ -111,7 +110,9 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
}
}
e.kmsContext = kms.Context{}
maps.Copy(e.kmsContext, ctx)
for k, v := range ctx {
e.kmsContext[k] = v
}
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
return err
@@ -224,7 +225,9 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
// Since we are rotating the keys, make sure to update the metadata.
oi.metadataOnly = true
oi.keyRotation = true
maps.Copy(oi.UserDefined, encMetadata)
for k, v := range encMetadata {
oi.UserDefined[k] = v
}
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -51,8 +51,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
for i := 0; b.Loop(); i++ {
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
@@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
for i := 0; b.Loop(); i++ {
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
for j := range totalPartsNR {
for j := 0; j < totalPartsNR; j++ {
if j < totalPartsNR-1 {
textPartData = textData[j*partSize : (j+1)*partSize-1]
} else {

View File

@@ -99,7 +99,7 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
return alg
}
}
return a
return
}
func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
@@ -59,17 +59,19 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
if z.MinioEnv == nil {
z.MinioEnv = make(map[string]string, zb0003)
} else if len(z.MinioEnv) > 0 {
clear(z.MinioEnv)
for key := range z.MinioEnv {
delete(z.MinioEnv, key)
}
}
for zb0003 > 0 {
zb0003--
var za0002 string
var za0003 string
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "MinioEnv")
return
}
var za0003 string
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "MinioEnv", za0002)
@@ -238,12 +240,14 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.MinioEnv == nil {
z.MinioEnv = make(map[string]string, zb0003)
} else if len(z.MinioEnv) > 0 {
clear(z.MinioEnv)
for key := range z.MinioEnv {
delete(z.MinioEnv, key)
}
}
for zb0003 > 0 {
var za0002 string
var za0003 string
zb0003--
var za0002 string
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MinioEnv")

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -154,6 +154,7 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
for index := range bucketsToBeUpdatedSlice {
index := index
g.Go(func() error {
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
}, index)
@@ -592,7 +593,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
output[idx] = obj
idx++
}
return output
return
}
// Disable timeouts and cancellation
@@ -1088,14 +1089,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
break
}
// check if have a file
if reader == nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing"))
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
if keyName, ok := formValues["Key"]; !ok {
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))
@@ -1386,7 +1379,10 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Set the correct hex md5sum for the fan-out stream.
fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil))
concurrentSize := min(runtime.GOMAXPROCS(0), 100)
concurrentSize := 100
if runtime.GOMAXPROCS(0) < concurrentSize {
concurrentSize = runtime.GOMAXPROCS(0)
}
fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries))
eventArgsList := make([]eventArgs, 0, len(fanOutEntries))
@@ -1657,11 +1653,9 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.HeadBucketAction, bucket, ""); s3Error != ErrNone {
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return
}
getBucketInfo := objectAPI.GetBucketInfo

View File

@@ -657,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
sha256sum := ""
var objectNames []string
for i := range 10 {
for i := 0; i < 10; i++ {
contentBytes := []byte("hello")
objectName := "test-object-" + strconv.Itoa(i)
if i == 0 {
@@ -687,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// The following block will create a bucket policy with delete object to 'public/*'. This is
// to test a mixed response of a successful & failure while deleting objects in a single request
policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
credentials.AccessKey, credentials.SecretKey, nil)

View File

@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"strconv"
"strings"
@@ -960,7 +959,9 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
UserDefined: meta,
}
}
maps.Copy(meta, objInfo.UserDefined)
for k, v := range objInfo.UserDefined {
meta[k] = v
}
if len(objInfo.UserTags) != 0 {
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
}

View File

@@ -248,19 +248,19 @@ func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Req
if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 {
proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr)
}
return subToken, proxied, success
return
}
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) {
if len(globalProxyEndpoints) == 0 {
return proxied, success
return
}
if index < 0 || index >= len(globalProxyEndpoints) {
return proxied, success
return
}
ep := globalProxyEndpoints[index]
if ep.IsLocal {
return proxied, success
return
}
return true, proxyRequest(ctx, w, r, ep, returnErr)
}

View File

@@ -472,7 +472,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
return meta, reloaded, nil
}
val, err, _ := sys.group.Do(bucket, func() (val any, err error) {
val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) {
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
if err != nil {
if !sys.Initialized() {
@@ -511,6 +511,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
g := errgroup.WithNErrs(len(buckets))
bucketMetas := make([]BucketMetadata, len(buckets))
for index := range buckets {
index := index
g.Go(func() error {
// Sleep and stagger to avoid blocked CPU and thundering
// herd upon start up sequence.

View File

@@ -38,6 +38,7 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v3/policy"
@@ -161,7 +162,7 @@ func (b BucketMetadata) lastUpdate() (t time.Time) {
t = b.BucketTargetsConfigMetaUpdatedAt
}
return t
return
}
// Versioning returns true if versioning is enabled
@@ -542,26 +543,26 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje
func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) {
if GlobalKMS == nil {
output = input
return output, metabytes, err
return
}
metadata := make(map[string]string)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
if err != nil {
return output, metabytes, err
return
}
outbuf := bytes.NewBuffer(nil)
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
crypto.S3.CreateMetadata(metadata, key, sealedKey)
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
if err != nil {
return output, metabytes, err
}
metabytes, err = json.Marshal(metadata)
if err != nil {
return output, metabytes, err
return
}
return outbuf.Bytes(), metabytes, nil
}
@@ -589,6 +590,6 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
}
outbuf := bytes.NewBuffer(nil)
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
return outbuf.Bytes(), err
}

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -297,9 +297,6 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, lerr)
}
if legalHoldPermErr != ErrNone {
return mode, retainDate, legalHold, legalHoldPermErr
}
}
if retentionRequested {

View File

@@ -122,7 +122,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
var wg sync.WaitGroup
var mu sync.Mutex
wg.Add(n)
for range n {
for i := 0; i < n; i++ {
go func() {
defer wg.Done()
// Sync start.
@@ -187,7 +187,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Test case - 1.
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@@ -199,7 +199,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Expecting StatusBadRequest (400).
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: maxBucketPolicySize + 1,
accessKey: credentials.AccessKey,
@@ -211,7 +211,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Expecting the HTTP response status to be StatusLengthRequired (411).
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: 0,
accessKey: credentials.AccessKey,
@@ -258,7 +258,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// checkBucketPolicyResources should fail.
{
bucketName: bucketName1,
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@@ -271,7 +271,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 404 StatusNotFound
{
bucketName: "non-existent-bucket",
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket")),
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@@ -284,7 +284,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 404 StatusNotFound
{
bucketName: ".invalid-bucket",
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket")),
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@@ -297,7 +297,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 400 StatusBadRequest.
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
accessKey: credentials.AccessKey,

View File

@@ -19,7 +19,6 @@ package cmd
import (
"encoding/json"
"maps"
"net/http"
"net/url"
"strconv"
@@ -188,7 +187,9 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
}
cloneURLValues := make(url.Values, len(r.Form))
maps.Copy(cloneURLValues, r.Form)
for k, v := range r.Form {
cloneURLValues[k] = v
}
for _, objLock := range []string{
xhttp.AmzObjectLockMode,
@@ -223,7 +224,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
// Add groups claim which could be a list. This will ensure that the claim
// `jwt:groups` works.
if grpsVal, ok := claims["groups"]; ok {
if grpsIs, ok := grpsVal.([]any); ok {
if grpsIs, ok := grpsVal.([]interface{}); ok {
grps := []string{}
for _, gI := range grpsIs {
if g, ok := gI.(string); ok {

View File

@@ -92,12 +92,12 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
}
if !quotaCfg.IsValid() {
if quotaCfg.Type == "fifo" {
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc quota clear alias/bucket' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
return quotaCfg, fmt.Errorf("invalid quota type 'fifo'")
}
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
}
return quotaCfg, err
return
}
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -21,7 +21,6 @@ import (
"bytes"
"context"
"fmt"
"maps"
"net/http"
"net/url"
"regexp"
@@ -172,13 +171,13 @@ func (ri ReplicateObjectInfo) TargetReplicationStatus(arn string) (status replic
repStatMatches := replStatusRegex.FindAllStringSubmatch(ri.ReplicationStatusInternal, -1)
for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 {
return status
return
}
if repStatMatch[1] == arn {
return replication.StatusType(repStatMatch[2])
}
}
return status
return
}
// TargetReplicationStatus - returns replication status of a target
@@ -186,13 +185,13 @@ func (o ObjectInfo) TargetReplicationStatus(arn string) (status replication.Stat
repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1)
for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 {
return status
return
}
if repStatMatch[1] == arn {
return replication.StatusType(repStatMatch[2])
}
}
return status
return
}
type replicateTargetDecision struct {
@@ -310,9 +309,9 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
targetsMap: make(map[string]replicateTargetDecision),
}
if len(s) == 0 {
return r, err
return
}
for p := range strings.SplitSeq(s, ",") {
for _, p := range strings.Split(s, ",") {
if p == "" {
continue
}
@@ -327,7 +326,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
}
r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]}
}
return r, err
return
}
// ReplicationState represents internal replication state
@@ -374,7 +373,7 @@ func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusT
case !rs.ReplicaStatus.Empty():
return rs.ReplicaStatus
default:
return st
return
}
}
@@ -736,8 +735,10 @@ type BucketReplicationResyncStatus struct {
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
m = make(map[string]TargetReplicationResyncStatus)
maps.Copy(m, rs.TargetsMap)
return m
for arn, st := range rs.TargetsMap {
m[arn] = st
}
return
}
func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus {
@@ -774,7 +775,7 @@ func extractReplicateDiffOpts(q url.Values) (opts madmin.ReplDiffOpts) {
opts.Verbose = q.Get("verbose") == "true"
opts.ARN = q.Get("arn")
opts.Prefix = q.Get("prefix")
return opts
return
}
const (

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/minio/minio/internal/bucket/replication"
"github.com/tinylib/msgp/msgp"
@@ -41,17 +41,19 @@ func (z *BucketReplicationResyncStatus) DecodeMsg(dc *msgp.Reader) (err error) {
if z.TargetsMap == nil {
z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002)
} else if len(z.TargetsMap) > 0 {
clear(z.TargetsMap)
for key := range z.TargetsMap {
delete(z.TargetsMap, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 TargetReplicationResyncStatus
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "TargetsMap")
return
}
var za0002 TargetReplicationResyncStatus
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "TargetsMap", za0001)
@@ -201,12 +203,14 @@ func (z *BucketReplicationResyncStatus) UnmarshalMsg(bts []byte) (o []byte, err
if z.TargetsMap == nil {
z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002)
} else if len(z.TargetsMap) > 0 {
clear(z.TargetsMap)
for key := range z.TargetsMap {
delete(z.TargetsMap, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 TargetReplicationResyncStatus
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TargetsMap")
@@ -284,17 +288,19 @@ func (z *MRFReplicateEntries) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Entries == nil {
z.Entries = make(map[string]MRFReplicateEntry, zb0002)
} else if len(z.Entries) > 0 {
clear(z.Entries)
for key := range z.Entries {
delete(z.Entries, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 MRFReplicateEntry
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
var za0002 MRFReplicateEntry
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
@@ -472,12 +478,14 @@ func (z *MRFReplicateEntries) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Entries == nil {
z.Entries = make(map[string]MRFReplicateEntry, zb0002)
} else if len(z.Entries) > 0 {
clear(z.Entries)
for key := range z.Entries {
delete(z.Entries, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 MRFReplicateEntry
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Entries")
@@ -864,17 +872,19 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Targets == nil {
z.Targets = make(map[string]replication.StatusType, zb0002)
} else if len(z.Targets) > 0 {
clear(z.Targets)
for key := range z.Targets {
delete(z.Targets, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 replication.StatusType
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Targets")
return
}
var za0002 replication.StatusType
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Targets", za0001)
@@ -892,17 +902,19 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.PurgeTargets == nil {
z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.PurgeTargets) > 0 {
clear(z.PurgeTargets)
for key := range z.PurgeTargets {
delete(z.PurgeTargets, key)
}
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 VersionPurgeStatusType
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "PurgeTargets")
return
}
var za0004 VersionPurgeStatusType
err = za0004.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
@@ -920,17 +932,19 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 {
clear(z.ResetStatusesMap)
for key := range z.ResetStatusesMap {
delete(z.ResetStatusesMap, key)
}
}
for zb0004 > 0 {
zb0004--
var za0005 string
var za0006 string
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap")
return
}
var za0006 string
za0006, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap", za0005)
@@ -1222,12 +1236,14 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Targets == nil {
z.Targets = make(map[string]replication.StatusType, zb0002)
} else if len(z.Targets) > 0 {
clear(z.Targets)
for key := range z.Targets {
delete(z.Targets, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 replication.StatusType
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Targets")
@@ -1250,12 +1266,14 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.PurgeTargets == nil {
z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.PurgeTargets) > 0 {
clear(z.PurgeTargets)
for key := range z.PurgeTargets {
delete(z.PurgeTargets, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 VersionPurgeStatusType
zb0003--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets")
@@ -1278,12 +1296,14 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 {
clear(z.ResetStatusesMap)
for key := range z.ResetStatusesMap {
delete(z.ResetStatusesMap, key)
}
}
for zb0004 > 0 {
var za0005 string
var za0006 string
zb0004--
var za0005 string
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap")

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -24,7 +24,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"net/http"
"net/url"
@@ -253,31 +252,31 @@ func getMustReplicateOptions(userDefined map[string]string, userTags string, sta
func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (dsc ReplicateDecision) {
// object layer not initialized we return with no decision.
if newObjectLayerFn() == nil {
return dsc
return
}
// Disable server-side replication on object prefixes which are excluded
// from versioning via the MinIO bucket versioning extension.
if !globalBucketVersioningSys.PrefixEnabled(bucket, object) {
return dsc
return
}
replStatus := mopts.ReplicationStatus()
if replStatus == replication.Replica && !mopts.isMetadataReplication() {
return dsc
return
}
if mopts.replicationRequest { // incoming replication request on target cluster
return dsc
return
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
replLogOnceIf(ctx, err, bucket)
return dsc
return
}
if cfg == nil {
return dsc
return
}
opts := replication.ObjectOpts{
@@ -348,16 +347,16 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
replLogOnceIf(ctx, err, bucket)
return dsc
return
}
// If incoming request is a replication request, it does not need to be re-replicated.
if delOpts.ReplicationRequest {
return dsc
return
}
// Skip replication if this object's prefix is excluded from being
// versioned.
if !delOpts.Versioned {
return dsc
return
}
opts := replication.ObjectOpts{
Name: dobj.ObjectName,
@@ -617,10 +616,10 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if dobj.VersionID == "" && rinfo.PrevReplicationStatus == replication.Completed && dobj.OpType != replication.ExistingObjectReplicationType {
rinfo.ReplicationStatus = rinfo.PrevReplicationStatus
return rinfo
return
}
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == replication.VersionPurgeComplete {
return rinfo
return
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
@@ -641,7 +640,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
} else {
rinfo.VersionPurgeStatus = replication.VersionPurgeFailed
}
return rinfo
return
}
// early return if already replicated delete marker for existing object replication/ healing delete markers
if dobj.DeleteMarkerVersionID != "" {
@@ -658,13 +657,13 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
// delete marker already replicated
if dobj.VersionID == "" && rinfo.VersionPurgeStatus.Empty() {
rinfo.ReplicationStatus = replication.Completed
return rinfo
return
}
case isErrObjectNotFound(serr), isErrVersionNotFound(serr):
// version being purged is already not found on target.
if !rinfo.VersionPurgeStatus.Empty() {
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
return rinfo
return
}
case isErrReadQuorum(serr), isErrWriteQuorum(serr):
// destination has some quorum issues, perform removeObject() anyways
@@ -678,7 +677,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if err != nil && !toi.ReplicationReady {
rinfo.ReplicationStatus = replication.Failed
rinfo.Err = err
return rinfo
return
}
}
}
@@ -709,7 +708,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
}
}
return rinfo
return
}
func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string {
@@ -804,7 +803,9 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
} else {
cs, mp := getCRCMeta(objInfo, 0, nil)
// Set object checksum.
maps.Copy(meta, cs)
for k, v := range cs {
meta[k] = v
}
isMP = mp
if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject {
// For objects where checksum is full object, it will be the same.
@@ -910,7 +911,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
}
putOpts.ServerSideEncryption = sseEnc
}
return putOpts, isMP, err
return
}
type replicationAction string
@@ -968,7 +969,9 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
t, _ := tags.ParseObjectTags(oi1.UserTags)
oi2Map := make(map[string]string)
maps.Copy(oi2Map, oi2.UserTags)
for k, v := range oi2.UserTags {
oi2Map[k] = v
}
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
return replicateMetadata
}
@@ -1208,7 +1211,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
if ri.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true
return rinfo
return
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
@@ -1220,7 +1223,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
return
}
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
@@ -1244,7 +1247,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
})
replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
}
return rinfo
return
}
defer gr.Close()
@@ -1268,7 +1271,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
return
}
}
@@ -1307,7 +1310,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
return
}
var headerSize int
@@ -1344,7 +1347,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
globalBucketTargetSys.markOffline(tgt.EndpointURL())
}
}
return rinfo
return
}
// replicateAll replicates metadata for specified version of the object to destination bucket
@@ -1380,7 +1383,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
return
}
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
@@ -1405,7 +1408,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
})
replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
}
return rinfo
return
}
defer gr.Close()
@@ -1418,7 +1421,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true
return rinfo
return
}
size, err := objInfo.GetActualSize()
@@ -1431,7 +1434,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
return
}
// Set the encrypted size for SSE-C objects
@@ -1494,7 +1497,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
rinfo.ReplicationAction = rAction
rinfo.ReplicationStatus = replication.Completed
}
return rinfo
return
}
} else {
// SSEC objects will refuse HeadObject without the decryption key.
@@ -1528,7 +1531,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
return
}
}
applyAction:
@@ -1594,7 +1597,7 @@ applyAction:
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return rinfo
return
}
var headerSize int
for k, v := range putOpts.Header() {
@@ -1631,7 +1634,7 @@ applyAction:
}
}
}
return rinfo
return
}
func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts minio.PutObjectOptions) (err error) {
@@ -1767,7 +1770,9 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri
}
if !copied {
dst = make(map[string]string, len(metadata))
maps.Copy(dst, metadata)
for k, v := range metadata {
dst[k] = v
}
copied = true
}
delete(dst, key)
@@ -2677,7 +2682,7 @@ func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
// Resync returns true if replication reset is requested
func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
if c.Empty() {
return r
return
}
// Now overlay existing object replication choices for target
@@ -2693,7 +2698,7 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc Replic
tgtArns := c.Config.FilterTargetArns(opts)
// indicates no matching target with Existing object replication enabled.
if len(tgtArns) == 0 {
return r
return
}
for _, t := range tgtArns {
opts.TargetArn = t
@@ -2719,7 +2724,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu
targets: make(map[string]ResyncTargetDecision, len(dsc.targetsMap)),
}
if c.remotes == nil {
return r
return
}
for _, tgt := range c.remotes.Targets {
d, ok := dsc.targetsMap[tgt.Arn]
@@ -2731,7 +2736,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu
}
r.targets[d.Arn] = resyncTarget(oi, tgt.Arn, tgt.ResetID, tgt.ResetBeforeDate, tgtStatuses[tgt.Arn])
}
return r
return
}
func targetResetHeader(arn string) string {
@@ -2750,28 +2755,28 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
if !ok { // existing object replication is enabled and object version is unreplicated so far.
if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested
rd.Replicate = true
return rd
return
}
// For existing object reset - this condition is needed
rd.Replicate = tgtStatus == ""
return rd
return
}
if resetID == "" || resetBeforeDate.Equal(timeSentinel) { // no reset in progress
return rd
return
}
// if already replicated, return true if a new reset was requested.
splits := strings.SplitN(rs, ";", 2)
if len(splits) != 2 {
return rd
return
}
newReset := splits[1] != resetID
if !newReset && tgtStatus == replication.Completed {
// already replicated and no reset requested
return rd
return
}
rd.Replicate = newReset && oi.ModTime.Before(resetBeforeDate)
return rd
return
}
const resyncTimeInterval = time.Minute * 1
@@ -2949,7 +2954,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
}()
var wg sync.WaitGroup
for i := range resyncParallelRoutines {
for i := 0; i < resyncParallelRoutines; i++ {
wg.Add(1)
workers[i] = make(chan ReplicateObjectInfo, 100)
i := i
@@ -3058,7 +3063,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
workers[h%uint64(resyncParallelRoutines)] <- roi
}
}
for i := range resyncParallelRoutines {
for i := 0; i < resyncParallelRoutines; i++ {
xioutil.SafeClose(workers[i])
}
wg.Wait()
@@ -3188,9 +3193,11 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []stri
<-ctx.Done()
return
}
duration := max(time.Duration(r.Float64()*float64(time.Minute)),
duration := time.Duration(r.Float64() * float64(time.Minute))
if duration < time.Second {
// Make sure to sleep at least a second to avoid high CPU ticks.
time.Second)
duration = time.Second
}
time.Sleep(duration)
}
}
@@ -3422,12 +3429,12 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
roi = getHealReplicateObjectInfo(oi, rcfg)
roi.RetryCount = uint32(retryCount)
if !roi.Dsc.ReplicateAny() {
return roi
return
}
// early return if replication already done, otherwise we need to determine if this
// version is an existing object that needs healing.
if oi.ReplicationStatus == replication.Completed && oi.VersionPurgeStatus.Empty() && !roi.ExistingObjResync.mustResync() {
return roi
return
}
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
@@ -3457,14 +3464,14 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
roi.ReplicationStatus == replication.Failed ||
roi.VersionPurgeStatus == replication.VersionPurgeFailed || roi.VersionPurgeStatus == replication.VersionPurgePending {
globalReplicationPool.Get().queueReplicaDeleteTask(dv)
return roi
return
}
// if replication status is Complete on DeleteMarker and existing object resync required
if roi.ExistingObjResync.mustResync() && (roi.ReplicationStatus == replication.Completed || roi.ReplicationStatus.Empty()) {
queueReplicateDeletesWrapper(dv, roi.ExistingObjResync)
return roi
return
}
return roi
return
}
if roi.ExistingObjResync.mustResync() {
roi.OpType = replication.ExistingObjectReplicationType
@@ -3473,13 +3480,13 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
case replication.Pending, replication.Failed:
roi.EventType = ReplicateHeal
globalReplicationPool.Get().queueReplicaTask(roi)
return roi
return
}
if roi.ExistingObjResync.mustResync() {
roi.EventType = ReplicateExisting
globalReplicationPool.Get().queueReplicaTask(roi)
}
return roi
return
}
const (
@@ -3790,13 +3797,14 @@ func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) (cs map[string]string
meta := make(map[string]string)
cs, isMP = oi.decryptChecksums(partNum, h)
for k, v := range cs {
if k == xhttp.AmzChecksumType {
cksum := hash.NewChecksumString(k, v)
if cksum == nil {
continue
}
cktype := hash.ChecksumStringToType(k)
if cktype.IsSet() {
meta[cktype.Key()] = v
meta[xhttp.AmzChecksumAlgo] = cktype.String()
if cksum.Valid() {
meta[cksum.Type.Key()] = v
meta[xhttp.AmzChecksumType] = cs[xhttp.AmzChecksumType]
meta[xhttp.AmzChecksumAlgo] = cksum.Type.String()
}
}
return meta, isMP

View File

@@ -19,7 +19,6 @@ package cmd
import (
"fmt"
"maps"
"math"
"sync/atomic"
"time"
@@ -38,7 +37,7 @@ type ReplicationLatency struct {
// Merge two replication latency into a new one
func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) {
newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram)
return newReplLatency
return
}
// Get upload latency of each object size range
@@ -49,7 +48,7 @@ func (rl ReplicationLatency) getUploadLatency() (ret map[string]uint64) {
// Convert nanoseconds to milliseconds
ret[sizeTagToString(k)] = uint64(v.avg() / time.Millisecond)
}
return ret
return
}
// Update replication upload latency with a new value
@@ -64,7 +63,7 @@ type ReplicationLastMinute struct {
func (rl ReplicationLastMinute) merge(other ReplicationLastMinute) (nl ReplicationLastMinute) {
nl = ReplicationLastMinute{rl.LastMinute.merge(other.LastMinute)}
return nl
return
}
func (rl *ReplicationLastMinute) addsize(n int64) {
@@ -222,7 +221,9 @@ func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
}
if s.Failed.ErrCounts == nil {
s.Failed.ErrCounts = make(map[string]int)
maps.Copy(s.Failed.ErrCounts, st.Failed.ErrCounts)
for k, v := range st.Failed.ErrCounts {
s.Failed.ErrCounts[k] = v
}
}
c.Stats[arn] = &s
}

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
@@ -617,17 +617,19 @@ func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Stats == nil {
z.Stats = make(map[string]*BucketReplicationStat, zb0002)
} else if len(z.Stats) > 0 {
clear(z.Stats)
for key := range z.Stats {
delete(z.Stats, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 *BucketReplicationStat
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Stats")
return
}
var za0002 *BucketReplicationStat
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
@@ -941,12 +943,14 @@ func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error)
if z.Stats == nil {
z.Stats = make(map[string]*BucketReplicationStat, zb0002)
} else if len(z.Stats) > 0 {
clear(z.Stats)
for key := range z.Stats {
delete(z.Stats, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 *BucketReplicationStat
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Stats")
@@ -1398,17 +1402,19 @@ func (z *BucketStatsMap) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Stats == nil {
z.Stats = make(map[string]BucketStats, zb0002)
} else if len(z.Stats) > 0 {
clear(z.Stats)
for key := range z.Stats {
delete(z.Stats, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 BucketStats
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Stats")
return
}
var za0002 BucketStats
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Stats", za0001)
@@ -1520,12 +1526,14 @@ func (z *BucketStatsMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Stats == nil {
z.Stats = make(map[string]BucketStats, zb0002)
} else if len(z.Stats) > 0 {
clear(z.Stats)
for key := range z.Stats {
delete(z.Stats, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 BucketStats
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Stats")

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -20,7 +20,6 @@ package cmd
import (
"context"
"errors"
"maps"
"net/url"
"sync"
"time"
@@ -237,7 +236,9 @@ func (sys *BucketTargetSys) healthStats() map[string]epHealth {
sys.hMutex.RLock()
defer sys.hMutex.RUnlock()
m := make(map[string]epHealth, len(sys.hc))
maps.Copy(m, sys.hc)
for k, v := range sys.hc {
m[k] = v
}
return m
}
@@ -285,7 +286,7 @@ func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType str
}
}
}
return targets
return
}
// ListBucketTargets - gets list of bucket targets for this bucket.
@@ -668,7 +669,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
// getRemoteARN gets existing ARN for an endpoint or generates a new one.
func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTarget, deplID string) (arn string, exists bool) {
if target == nil {
return arn, exists
return
}
sys.RLock()
defer sys.RUnlock()
@@ -682,7 +683,7 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
}
}
if !target.Type.IsValid() {
return arn, exists
return
}
return generateARN(target, deplID), false
}

View File

@@ -57,9 +57,11 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) {
// callhome running on a different node.
// sleep for some time and try again.
duration := max(time.Duration(r.Float64()*float64(globalCallhomeConfig.FrequencyDur())),
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur()))
if duration < time.Second {
// Make sure to sleep at least a second to avoid high CPU ticks.
time.Second)
duration = time.Second
}
time.Sleep(duration)
}
}()

View File

@@ -105,7 +105,7 @@ func init() {
gob.Register(madmin.TimeInfo{})
gob.Register(madmin.XFSErrorConfigs{})
gob.Register(map[string]string{})
gob.Register(map[string]any{})
gob.Register(map[string]interface{}{})
// All minio-go and madmin-go API operations shall be performed only once,
// another way to look at this is we are turning off retries.
@@ -258,7 +258,7 @@ func initConsoleServer() (*consoleapi.Server, error) {
if !serverDebugLog {
// Disable console logging if server debug log is not enabled
noLog := func(string, ...any) {}
noLog := func(string, ...interface{}) {}
consoleapi.LogInfo = noLog
consoleapi.LogError = noLog
@@ -761,7 +761,7 @@ func serverHandleEnvVars() {
domains := env.Get(config.EnvDomain, "")
if len(domains) != 0 {
for domainName := range strings.SplitSeq(domains, config.ValueSeparator) {
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
if _, ok := dns2.IsDomainName(domainName); !ok {
logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName),
"Invalid MINIO_DOMAIN value in environment variable")
@@ -1059,6 +1059,6 @@ func (a bgCtx) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false
}
func (a bgCtx) Value(key any) any {
func (a bgCtx) Value(key interface{}) interface{} {
return a.parent.Value(key)
}

View File

@@ -43,6 +43,7 @@ func Test_readFromSecret(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil {
@@ -154,6 +155,7 @@ MINIO_ROOT_PASSWORD=minio123`,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil {

View File

@@ -21,7 +21,6 @@ import (
"context"
"errors"
"fmt"
"maps"
"strings"
"sync"
@@ -79,8 +78,12 @@ func initHelp() {
config.BatchSubSys: batch.DefaultKVS,
config.BrowserSubSys: browser.DefaultKVS,
}
maps.Copy(kvs, notify.DefaultNotificationKVS)
maps.Copy(kvs, lambda.DefaultLambdaKVS)
for k, v := range notify.DefaultNotificationKVS {
kvs[k] = v
}
for k, v := range lambda.DefaultLambdaKVS {
kvs[k] = v
}
if globalIsErasure {
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
kvs[config.HealSubSys] = heal.DefaultKVS
@@ -352,9 +355,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
}
case config.IdentityOpenIDSubSys:
if _, err := openid.LookupConfig(s,
xhttp.WithUserAgent(NewHTTPTransport(), func() string {
return getUserAgent(getMinioMode())
}), xhttp.DrainBody, globalSite.Region()); err != nil {
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
return err
}
case config.IdentityLDAPSubSys:

View File

@@ -38,12 +38,12 @@ import (
)
// Save config file to corresponding backend
func Save(configFile string, data any) error {
func Save(configFile string, data interface{}) error {
return quick.SaveConfig(data, configFile, globalEtcdClient)
}
// Load config from backend
func Load(configFile string, data any) (quick.Config, error) {
func Load(configFile string, data interface{}) (quick.Config, error) {
return quick.LoadConfig(configFile, globalEtcdClient, data)
}

View File

@@ -129,7 +129,7 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte)
return saveConfig(ctx, objAPI, historyFile, kv)
}
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg any) error {
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error {
data, err := json.Marshal(cfg)
if err != nil {
return err

View File

@@ -28,7 +28,7 @@ import (
"github.com/minio/madmin-go/v3/logger/log"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/logger/target/console"
types "github.com/minio/minio/internal/logger/target/loggertypes"
"github.com/minio/minio/internal/logger/target/types"
"github.com/minio/minio/internal/pubsub"
xnet "github.com/minio/pkg/v3/net"
)
@@ -101,7 +101,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
lastN = make([]log.Info, last)
sys.RLock()
sys.logBuf.Do(func(p any) {
sys.logBuf.Do(func(p interface{}) {
if p != nil {
lg, ok := p.(log.Info)
if ok && lg.SendLog(node, logKind) {
@@ -113,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
sys.RUnlock()
// send last n console log messages in order filtered by node
if cnt > 0 {
for i := range last {
for i := 0; i < last; i++ {
entry := lastN[(cnt+i)%last]
if (entry == log.Info{}) {
continue
@@ -155,7 +155,7 @@ func (sys *HTTPConsoleLoggerSys) Stats() types.TargetStats {
// Content returns the console stdout log
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
sys.RLock()
sys.logBuf.Do(func(p any) {
sys.logBuf.Do(func(p interface{}) {
if p != nil {
lg, ok := p.(log.Info)
if ok {
@@ -167,7 +167,7 @@ func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
})
sys.RUnlock()
return logs
return
}
// Cancel - cancels the target
@@ -181,7 +181,7 @@ func (sys *HTTPConsoleLoggerSys) Type() types.TargetType {
// Send log message 'e' to console and publish to console
// log pubsub system
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry any) error {
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry interface{}) error {
var lg log.Info
switch e := entry.(type) {
case log.Entry:

View File

@@ -106,14 +106,16 @@ func (p *scannerMetrics) log(s scannerMetric, paths ...string) func(custom map[s
// time n scanner actions.
// Use for s < scannerMetricLastRealtime
func (p *scannerMetrics) timeN(s scannerMetric) func(n int) {
func (p *scannerMetrics) timeN(s scannerMetric) func(n int) func() {
startTime := time.Now()
return func(n int) {
duration := time.Since(startTime)
return func(n int) func() {
return func() {
duration := time.Since(startTime)
atomic.AddUint64(&p.operations[s], uint64(n))
if s < scannerMetricLastRealtime {
p.latency[s].add(duration)
atomic.AddUint64(&p.operations[s], uint64(n))
if s < scannerMetricLastRealtime {
p.latency[s].add(duration)
}
}
}
}
@@ -196,7 +198,7 @@ func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(p
func (p *scannerMetrics) getCurrentPaths() []string {
var res []string
prefix := globalLocalNodeName + "/"
p.currentPaths.Range(func(key, value any) bool {
p.currentPaths.Range(func(key, value interface{}) bool {
// We are a bit paranoid, but better miss an entry than crash.
name, ok := key.(string)
if !ok {
@@ -219,7 +221,7 @@ func (p *scannerMetrics) getCurrentPaths() []string {
// (since this is concurrent it may not be 100% reliable)
func (p *scannerMetrics) activeDrives() int {
var i int
p.currentPaths.Range(func(k, v any) bool {
p.currentPaths.Range(func(k, v interface{}) bool {
i++
return true
})
@@ -297,7 +299,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
m.CollectedAt = time.Now()
m.ActivePaths = p.getCurrentPaths()
m.LifeTimeOps = make(map[string]uint64, scannerMetricLast)
for i := range scannerMetricLast {
for i := scannerMetric(0); i < scannerMetricLast; i++ {
if n := atomic.LoadUint64(&p.operations[i]); n > 0 {
m.LifeTimeOps[i.String()] = n
}
@@ -307,7 +309,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
}
m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime)
for i := range scannerMetricLastRealtime {
for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ {
lm := p.lastMinute(i)
if lm.N > 0 {
m.LastMinute.Actions[i.String()] = lm.asTimedAction()

View File

@@ -78,9 +78,11 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
// Run the data scanner in a loop
for {
runDataScanner(ctx, objAPI)
duration := max(time.Duration(r.Float64()*float64(scannerCycle.Load())),
duration := time.Duration(r.Float64() * float64(scannerCycle.Load()))
if duration < time.Second {
// Make sure to sleep at least a second to avoid high CPU ticks.
time.Second)
duration = time.Second
}
time.Sleep(duration)
}
}()
@@ -330,7 +332,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, drive *xlStorage, c
}
var skipHeal atomic.Bool
if !globalIsErasure || cache.Info.SkipHealing {
if globalIsErasure || cache.Info.SkipHealing {
skipHeal.Store(true)
}

View File

@@ -127,7 +127,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
v2 uuid-2 modTime -3m
v1 uuid-1 modTime -4m
*/
for i := range 5 {
for i := 0; i < 5; i++ {
fivs[i] = FileInfo{
Volume: bucket,
Name: obj,

View File

@@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"net/http"
"path"
@@ -100,7 +99,9 @@ func (ats *allTierStats) clone() *allTierStats {
}
dst := *ats
dst.Tiers = make(map[string]tierStats, len(ats.Tiers))
maps.Copy(dst.Tiers, ats.Tiers)
for tier, st := range ats.Tiers {
dst.Tiers[tier] = st
}
return &dst
}
@@ -346,7 +347,9 @@ func (e dataUsageEntry) clone() dataUsageEntry {
// We operate on a copy from the receiver.
if e.Children != nil {
ch := make(dataUsageHashMap, len(e.Children))
maps.Copy(ch, e.Children)
for k, v := range e.Children {
ch[k] = v
}
e.Children = ch
}
@@ -1221,11 +1224,11 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return err
return
}
if zb0002 == 0 {
*z = nil
return err
return
}
*z = make(dataUsageHashMap, zb0002)
for i := uint32(0); i < zb0002; i++ {
@@ -1234,12 +1237,12 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
zb0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return err
return
}
(*z)[zb0003] = struct{}{}
}
}
return err
return
}
// EncodeMsg implements msgp.Encodable
@@ -1247,16 +1250,16 @@ func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteArrayHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return err
return
}
for zb0004 := range z {
err = en.WriteString(zb0004)
if err != nil {
err = msgp.WrapError(err, zb0004)
return err
return
}
}
return err
return
}
// MarshalMsg implements msgp.Marshaler
@@ -1266,7 +1269,7 @@ func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) {
for zb0004 := range z {
o = msgp.AppendString(o, zb0004)
}
return o, err
return
}
// UnmarshalMsg implements msgp.Unmarshaler
@@ -1275,7 +1278,7 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return o, err
return
}
if zb0002 == 0 {
*z = nil
@@ -1288,13 +1291,13 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
zb0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return o, err
return
}
(*z)[zb0003] = struct{}{}
}
}
o = bts
return o, err
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
@@ -1303,7 +1306,7 @@ func (z dataUsageHashMap) Msgsize() (s int) {
for zb0004 := range z {
s += msgp.StringPrefixSize + len(zb0004)
}
return s
return
}
//msgp:encode ignore currentScannerCycle

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"time"
@@ -36,17 +36,19 @@ func (z *allTierStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Tiers == nil {
z.Tiers = make(map[string]tierStats, zb0002)
} else if len(z.Tiers) > 0 {
clear(z.Tiers)
for key := range z.Tiers {
delete(z.Tiers, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 tierStats
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Tiers")
return
}
var za0002 tierStats
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
@@ -205,12 +207,14 @@ func (z *allTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Tiers == nil {
z.Tiers = make(map[string]tierStats, zb0002)
} else if len(z.Tiers) > 0 {
clear(z.Tiers)
for key := range z.Tiers {
delete(z.Tiers, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 tierStats
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tiers")
@@ -411,17 +415,19 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntry, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntry
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntry
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@@ -537,12 +543,14 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntry, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntry
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@@ -791,17 +799,19 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV2
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV2
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@@ -854,12 +864,14 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV2
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@@ -930,17 +942,19 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV3
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV3
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@@ -993,12 +1007,14 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV3
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@@ -1069,17 +1085,19 @@ func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV4
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV4
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@@ -1132,12 +1150,14 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV4
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@@ -1208,17 +1228,19 @@ func (z *dataUsageCacheV5) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV5
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV5
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@@ -1271,12 +1293,14 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV5
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@@ -1347,17 +1371,19 @@ func (z *dataUsageCacheV6) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV6
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV6
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@@ -1410,12 +1436,14 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV6
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@@ -1486,17 +1514,19 @@ func (z *dataUsageCacheV7) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV7, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV7
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV7
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@@ -1549,12 +1579,14 @@ func (z *dataUsageCacheV7) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV7, zb0002)
} else if len(z.Cache) > 0 {
clear(z.Cache)
for key := range z.Cache {
delete(z.Cache, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV7
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@@ -1713,17 +1745,19 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
clear(z.AllTierStats.Tiers)
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
}
for zb0005 > 0 {
zb0005--
var za0003 string
var za0004 tierStats
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")
return
}
var za0004 tierStats
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
@@ -2177,12 +2211,14 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
clear(z.AllTierStats.Tiers)
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
}
for zb0005 > 0 {
var za0003 string
var za0004 tierStats
zb0005--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")
@@ -2948,17 +2984,19 @@ func (z *dataUsageEntryV7) DecodeMsg(dc *msgp.Reader) (err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
clear(z.AllTierStats.Tiers)
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
}
for zb0005 > 0 {
zb0005--
var za0003 string
var za0004 tierStats
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")
return
}
var za0004 tierStats
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
@@ -3154,12 +3192,14 @@ func (z *dataUsageEntryV7) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
clear(z.AllTierStats.Tiers)
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
}
for zb0005 > 0 {
var za0003 string
var za0004 tierStats
zb0005--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")

View File

@@ -1,7 +1,7 @@
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"

View File

@@ -56,13 +56,13 @@ func TestDataUsageUpdate(t *testing.T) {
var s os.FileInfo
s, err = os.Stat(item.Path)
if err != nil {
return sizeS, err
return
}
sizeS.totalSize = s.Size()
sizeS.versions++
return sizeS, nil
}
return sizeS, err
return
}
xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()}
xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) {
@@ -179,7 +179,7 @@ func TestDataUsageUpdate(t *testing.T) {
t.Fatal(err)
}
// Changed dir must be picked up in this many cycles.
for range dataUsageUpdateDirCycles {
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
@@ -279,13 +279,13 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
var s os.FileInfo
s, err = os.Stat(item.Path)
if err != nil {
return sizeS, err
return
}
sizeS.totalSize = s.Size()
sizeS.versions++
return sizeS, err
return
}
return sizeS, err
return
}
weSleep := func() bool { return false }
@@ -428,7 +428,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
t.Fatal(err)
}
// Changed dir must be picked up in this many cycles.
for range dataUsageUpdateDirCycles {
for i := 0; i < dataUsageUpdateDirCycles; i++ {
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
@@ -526,13 +526,13 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
// generateUsageTestFiles create nFolders * nFiles files of size bytes each.
func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) {
pl := make([]byte, size)
for i := range nFolders {
for i := 0; i < nFolders; i++ {
name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt")
err := os.MkdirAll(filepath.Dir(name), os.ModePerm)
if err != nil {
t.Fatal(err)
}
for j := range nFiles {
for j := 0; j < nFiles; j++ {
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
err = os.WriteFile(name, pl, os.ModePerm)
if err != nil {
@@ -569,13 +569,13 @@ func TestDataUsageCacheSerialize(t *testing.T) {
var s os.FileInfo
s, err = os.Stat(item.Path)
if err != nil {
return sizeS, err
return
}
sizeS.versions++
sizeS.totalSize = s.Size()
return sizeS, err
return
}
return sizeS, err
return
}
xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()}
xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) {
@@ -618,7 +618,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
}
// equalAsJSON returns whether the values are equal when encoded as JSON.
func equalAsJSON(a, b any) bool {
func equalAsJSON(a, b interface{}) bool {
aj, err := json.Marshal(a)
if err != nil {
panic(err)

View File

@@ -87,7 +87,7 @@ func (d *DummyDataGen) Read(b []byte) (n int, err error) {
}
err = io.EOF
}
return n, err
return
}
func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {

View File

@@ -129,9 +129,12 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
if failPct > dynamicTimeoutIncreaseThresholdPct {
// We are hitting the timeout too often, so increase the timeout by 25%
timeout := min(
// Set upper cap.
atomic.LoadInt64(&dt.timeout)*125/100, int64(maxDynamicTimeout))
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100
// Set upper cap.
if timeout > int64(maxDynamicTimeout) {
timeout = int64(maxDynamicTimeout)
}
// Safety, shouldn't happen
if timeout < dt.minimum {
timeout = dt.minimum

View File

@@ -30,7 +30,7 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) {
initial := timeout.Timeout()
for range dynamicTimeoutLogSize {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogFailure()
}
@@ -46,13 +46,13 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) {
initial := timeout.Timeout()
for range dynamicTimeoutLogSize {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogFailure()
}
adjusted := timeout.Timeout()
for range dynamicTimeoutLogSize {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogFailure()
}
@@ -68,7 +68,7 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) {
initial := timeout.Timeout()
for range dynamicTimeoutLogSize {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogSuccess(20 * time.Second)
}
@@ -84,13 +84,13 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
initial := timeout.Timeout()
for range dynamicTimeoutLogSize {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogSuccess(20 * time.Second)
}
adjusted := timeout.Timeout()
for range dynamicTimeoutLogSize {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogSuccess(20 * time.Second)
}
@@ -107,8 +107,8 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
initial := timeout.Timeout()
const successTimeout = 20 * time.Second
for range 100 {
for range dynamicTimeoutLogSize {
for l := 0; l < 100; l++ {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogSuccess(successTimeout)
}
}
@@ -129,8 +129,8 @@ func TestDynamicTimeoutConcurrent(t *testing.T) {
rng := rand.New(rand.NewSource(int64(i)))
go func() {
defer wg.Done()
for range 100 {
for range 100 {
for i := 0; i < 100; i++ {
for j := 0; j < 100; j++ {
timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64()))
}
to := timeout.Timeout()
@@ -150,8 +150,8 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
initial := timeout.Timeout()
const successTimeout = 20 * time.Second
for range 100 {
for range dynamicTimeoutLogSize {
for l := 0; l < 100; l++ {
for i := 0; i < dynamicTimeoutLogSize; i++ {
timeout.LogSuccess(successTimeout)
}
}
@@ -166,9 +166,13 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
const successTimeout = 20 * time.Second
for range dynamicTimeoutLogSize {
for i := 0; i < dynamicTimeoutLogSize; i++ {
rnd := f()
duration := max(time.Duration(float64(successTimeout)*rnd), 100*time.Millisecond)
duration := time.Duration(float64(successTimeout) * rnd)
if duration < 100*time.Millisecond {
duration = 100 * time.Millisecond
}
if duration >= time.Minute {
timeout.LogFailure()
} else {
@@ -184,7 +188,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
initial := timeout.Timeout()
for range 10 {
for try := 0; try < 10; try++ {
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
}
@@ -201,7 +205,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
initial := timeout.Timeout()
for range 10 {
for try := 0; try < 10; try++ {
testDynamicTimeoutAdjust(t, timeout, func() float64 {
return 1.0 + rand.NormFloat64()
})

View File

@@ -29,7 +29,6 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"path"
"strconv"
@@ -38,6 +37,7 @@ import (
"github.com/minio/kms-go/kes"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/etag"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http"
@@ -118,7 +118,10 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
names = make([]string, 0, BatchSize)
)
for len(objects) > 0 {
N := min(len(objects), BatchSize)
N := BatchSize
if len(objects) < BatchSize {
N = len(objects)
}
batch := objects[:N]
// We have to decrypt only ETags of SSE-S3 single-part
@@ -290,7 +293,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
return err
}
sealedKey = objectKey.Seal(newKey.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, newKey.KeyID, newKey.Ciphertext, sealedKey)
crypto.S3.CreateMetadata(metadata, newKey, sealedKey)
return nil
case crypto.S3KMS:
if GlobalKMS == nil {
@@ -315,7 +318,9 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
// of the client provided context and add the bucket
// key, if not present.
kmsCtx := kms.Context{}
maps.Copy(kmsCtx, cryptoCtx)
for k, v := range cryptoCtx {
kmsCtx[k] = v
}
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}
@@ -328,7 +333,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
}
sealedKey := objectKey.Seal(newKey.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3KMS.String(), bucket, object)
crypto.S3KMS.CreateMetadata(metadata, newKey.KeyID, newKey.Ciphertext, sealedKey, cryptoCtx)
crypto.S3KMS.CreateMetadata(metadata, newKey, sealedKey, cryptoCtx)
return nil
case crypto.SSEC:
sealedKey, err := crypto.SSEC.ParseMetadata(metadata)
@@ -371,7 +376,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
crypto.S3.CreateMetadata(metadata, key, sealedKey)
return objectKey, nil
case crypto.S3KMS:
if GlobalKMS == nil {
@@ -385,7 +390,9 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
// of the client provided context and add the bucket
// key, if not present.
kmsCtx := kms.Context{}
maps.Copy(kmsCtx, cryptoCtx)
for k, v := range cryptoCtx {
kmsCtx[k] = v
}
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}
@@ -402,7 +409,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3KMS.String(), bucket, object)
crypto.S3KMS.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey, cryptoCtx)
crypto.S3KMS.CreateMetadata(metadata, key, sealedKey, cryptoCtx)
return objectKey, nil
case crypto.SSEC:
objectKey := crypto.GenerateKey(key, rand.Reader)
@@ -420,7 +427,7 @@ func newEncryptReader(ctx context.Context, content io.Reader, kind crypto.Type,
return nil, crypto.ObjectKey{}, err
}
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
if err != nil {
return nil, crypto.ObjectKey{}, crypto.ErrInvalidCustomerKey
}
@@ -450,7 +457,7 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[
}
}
_, err = newEncryptMetadata(r.Context(), kind, keyID, key, bucket, object, metadata, kmsCtx)
return err
return
}
// EncryptRequest takes the client provided content and encrypts the data
@@ -563,6 +570,7 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
reader, err := sio.DecryptReader(client, sio.Config{
Key: objectEncryptionKey,
SequenceNumber: seqNumber,
CipherSuites: fips.DARECiphers(),
})
if err != nil {
return nil, crypto.ErrInvalidCustomerKey
@@ -855,7 +863,7 @@ func tryDecryptETag(key []byte, encryptedETag string, sses3 bool) string {
func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) {
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
err = errors.New("Object is not encrypted")
return encOff, encLength, skipLen, seqNumber, partStart, err
return
}
if rs == nil {
@@ -873,7 +881,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
partSize, err = sio.DecryptedSize(uint64(part.Size))
if err != nil {
err = errObjectTampered
return encOff, encLength, skipLen, seqNumber, partStart, err
return
}
sizes[i] = int64(partSize)
decObjSize += int64(partSize)
@@ -883,7 +891,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
partSize, err = sio.DecryptedSize(uint64(o.Size))
if err != nil {
err = errObjectTampered
return encOff, encLength, skipLen, seqNumber, partStart, err
return
}
sizes = []int64{int64(partSize)}
decObjSize = sizes[0]
@@ -892,7 +900,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
var off, length int64
off, length, err = rs.GetOffsetLength(decObjSize)
if err != nil {
return encOff, encLength, skipLen, seqNumber, partStart, err
return
}
// At this point, we have:
@@ -1054,7 +1062,7 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn {
var buffer bytes.Buffer
mac := hmac.New(sha256.New, key[:])
mac.Write([]byte(baseKey))
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil)}); err != nil {
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
logger.CriticalIf(context.Background(), errors.New("unable to encrypt using object key"))
}
return buffer.Bytes()
@@ -1068,16 +1076,8 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
return input, nil
}
var key []byte
if crypto.SSECopy.IsRequested(h) {
sseCopyKey, err := crypto.SSECopy.ParseHTTP(h)
if err != nil {
return nil, err
}
key = sseCopyKey[:]
} else {
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
key = k[:]
}
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
key = k[:]
}
key, err := decryptObjectMeta(key, o.Bucket, o.Name, o.UserDefined)
if err != nil {
@@ -1085,12 +1085,11 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
}
mac := hmac.New(sha256.New, key)
mac.Write([]byte(baseKey))
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil)})
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
}
}
// decryptPartsChecksums will attempt to decrypt and decode part checksums, and save
// only the decrypted part checksum values on ObjectInfo directly.
// decryptPartsChecksums will attempt to decode checksums and return it/them if set.
// if part > 0, and we have the checksum for the part that will be returned.
func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
data := o.Checksum
@@ -1115,23 +1114,6 @@ func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
}
}
// decryptChecksum will attempt to decrypt the ObjectInfo.Checksum, returns the decrypted value
// An error is only returned if it was encrypted and the decryption failed.
func (o *ObjectInfo) decryptChecksum(h http.Header) ([]byte, error) {
data := o.Checksum
if len(data) == 0 {
return data, nil
}
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
if err != nil {
return nil, err
}
data = decrypted
}
return data, nil
}
// metadataEncryptFn provides an encryption function for metadata.
// Will return nil, nil if unencrypted.
func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn, error) {

View File

@@ -384,7 +384,7 @@ func TestGetDecryptedRange(t *testing.T) {
// Simple useful utilities
repeat = func(k int64, n int) []int64 {
a := []int64{}
for range n {
for i := 0; i < n; i++ {
a = append(a, k)
}
return a
@@ -471,7 +471,10 @@ func TestGetDecryptedRange(t *testing.T) {
// round up the lbPartOffset
// to the end of the
// corresponding DARE package
lbPkgEndOffset := min(lbPartOffset-(lbPartOffset%pkgSz)+pkgSz, v)
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz
if lbPkgEndOffset > v {
lbPkgEndOffset = v
}
bytesToDrop := v - lbPkgEndOffset
// Last segment to update `l`
@@ -483,7 +486,7 @@ func TestGetDecryptedRange(t *testing.T) {
cumulativeSum += v
cumulativeEncSum += getEncSize(v)
}
return o, l, skip, sn, ps
return
}
for i, test := range testMPs {

View File

@@ -22,7 +22,7 @@ import (
"fmt"
"net/url"
"runtime"
"slices"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
@@ -122,7 +122,9 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar
// eyes that we prefer a sorted setCount slice for the
// subsequent function to figure out the right common
// divisor, it avoids loops.
slices.Sort(setCounts)
sort.Slice(setCounts, func(i, j int) bool {
return setCounts[i] < setCounts[j]
})
return setCounts
}
@@ -443,7 +445,7 @@ func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err err
layout: setArgs,
})
}
return layout, err
return
}
// mergeDisksLayoutFromArgs supports with and without ellipses transparently.
@@ -475,7 +477,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
legacy: true,
pools: []poolDisksLayout{{layout: setArgs, cmdline: strings.Join(args, " ")}},
}
return err
return
}
for _, arg := range args {
@@ -489,7 +491,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
}
ctxt.Layout.pools = append(ctxt.Layout.pools, poolDisksLayout{cmdline: arg, layout: setArgs})
}
return err
return
}
// CreateServerEndpoints - validates and creates new endpoints from input args, supports

View File

@@ -55,6 +55,7 @@ func TestCreateServerEndpoints(t *testing.T) {
}
for i, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
srvCtxt := serverCtxt{}
err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt)
@@ -84,6 +85,7 @@ func TestGetDivisibleSize(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotGCD := getDivisibleSize(testCase.totalSizes)
if testCase.result != gotGCD {
@@ -170,6 +172,7 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args {
@@ -291,6 +294,7 @@ func TestGetSetIndexes(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args {
@@ -633,6 +637,7 @@ func TestParseEndpointSet(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotEs, err := parseEndpointSet(0, testCase.arg)
if err != nil && testCase.success {

View File

@@ -267,7 +267,7 @@ func (l EndpointServerPools) ESCount() (count int) {
for _, p := range l {
count += p.SetCount
}
return count
return
}
// GetNodes returns a sorted list of nodes in this cluster
@@ -297,7 +297,7 @@ func (l EndpointServerPools) GetNodes() (nodes []Node) {
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].Host < nodes[j].Host
})
return nodes
return
}
// GetPoolIdx return pool index
@@ -588,7 +588,7 @@ func (endpoints Endpoints) GetAllStrings() (all []string) {
for _, e := range endpoints {
all = append(all, e.String())
}
return all
return
}
func hostResolveToLocalhost(endpoint Endpoint) bool {

View File

@@ -312,6 +312,7 @@ func TestCreateEndpoints(t *testing.T) {
}
for i, testCase := range testCases {
i := i
testCase := testCase
t.Run("", func(t *testing.T) {
var srvCtxt serverCtxt

View File

@@ -69,7 +69,7 @@ func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int
})
return enc
}
return e, err
return
}
// EncodeData encodes the given data and returns the erasure-coded data.
@@ -136,7 +136,10 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64
shardSize := e.ShardSize()
shardFileSize := e.ShardFileSize(totalLength)
endShard := (startOffset + length) / e.blockSize
tillOffset := min(endShard*shardSize+shardSize, shardFileSize)
tillOffset := endShard*shardSize + shardSize
if tillOffset > shardFileSize {
tillOffset = shardFileSize
}
return tillOffset
}

View File

@@ -30,6 +30,7 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
var mu sync.Mutex
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, i := range r.Perm(len(disks)) {
i := i
wg.Add(1)
go func() {
defer wg.Done()

View File

@@ -251,7 +251,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
buf := &bytes.Buffer{}
// Verify erasure.Decode() for random offsets and lengths.
for range iterations {
for i := 0; i < iterations; i++ {
offset := r.Int63n(length)
readLen := r.Int63n(length - offset)
@@ -308,16 +308,17 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
b.Fatalf("failed to create erasure test file: %v", err)
}
for i := range dataDown {
for i := 0; i < dataDown; i++ {
writers[i] = nil
}
for i := data; i < data+parityDown; i++ {
writers[i] = nil
}
b.ResetTimer()
b.SetBytes(size)
b.ReportAllocs()
for b.Loop() {
for i := 0; i < b.N; i++ {
bitrotReaders := make([]io.ReaderAt, len(disks))
for index, disk := range disks {
if writers[index] == nil {

View File

@@ -172,16 +172,17 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
content := make([]byte, size)
for i := range dataDown {
for i := 0; i < dataDown; i++ {
disks[i] = OfflineDisk
}
for i := data; i < data+parityDown; i++ {
disks[i] = OfflineDisk
}
b.ResetTimer()
b.SetBytes(size)
b.ReportAllocs()
for b.Loop() {
for i := 0; i < b.N; i++ {
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
if disk == OfflineDisk {

View File

@@ -102,7 +102,7 @@ func TestErasureHeal(t *testing.T) {
// setup stale disks for the test case
staleDisks := make([]StorageAPI, len(disks))
copy(staleDisks, disks)
for j := range staleDisks {
for j := 0; j < len(staleDisks); j++ {
if j < test.offDisks {
readers[j] = nil
} else {

Some files were not shown because too many files have changed in this diff Show More