mirror of
https://github.com/minio/minio.git
synced 2026-02-12 13:50:15 -05:00
Compare commits
60 Commits
key-versio
...
RELEASE.20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d0f50cdd9b | ||
|
|
da532ab93d | ||
|
|
558fc1c09c | ||
|
|
9fdbf6fe83 | ||
|
|
5c87d4ae87 | ||
|
|
f0b91e5504 | ||
|
|
3b7cb6512c | ||
|
|
4ea6f3b06b | ||
|
|
86d9d9b55e | ||
|
|
5a35585acd | ||
|
|
0848e69602 | ||
|
|
02ba581ecf | ||
|
|
b44b2a090c | ||
|
|
c7d6a9722d | ||
|
|
a8abdc797e | ||
|
|
0638ccc5f3 | ||
|
|
b1a34fd63f | ||
|
|
ffcfa36b13 | ||
|
|
376fbd11a7 | ||
|
|
c76f209ccc | ||
|
|
7a6a2256b1 | ||
|
|
d002beaee3 | ||
|
|
71f293d9ab | ||
|
|
e3d183b6a4 | ||
|
|
752abc2e2c | ||
|
|
b9f0e8c712 | ||
|
|
7ced9663e6 | ||
|
|
50fcf9b670 | ||
|
|
64f5c6103f | ||
|
|
e909be6380 | ||
|
|
83b2ad418b | ||
|
|
7a64bb9766 | ||
|
|
34679befef | ||
|
|
4021d8c8e2 | ||
|
|
de234b888c | ||
|
|
2718d9a430 | ||
|
|
a65292cab1 | ||
|
|
e0c79be251 | ||
|
|
a6c538c5a1 | ||
|
|
e1fcaebc77 | ||
|
|
21409f112d | ||
|
|
417c8648f0 | ||
|
|
e2245a0b12 | ||
|
|
b4b3d208dd | ||
|
|
0a36d41dcd | ||
|
|
ea77bcfc98 | ||
|
|
9f24ca5d66 | ||
|
|
816666a4c6 | ||
|
|
2c7fe094d1 | ||
|
|
9ebe168782 | ||
|
|
ee2028cde6 | ||
|
|
ecde75f911 | ||
|
|
12a6ea89cc | ||
|
|
63e102c049 | ||
|
|
160f8a901b | ||
|
|
ef9b03fbf5 | ||
|
|
1d50cae43d | ||
|
|
c0a33952c6 | ||
|
|
8cad40a483 | ||
|
|
6d18dba9a2 |
59
.github/workflows/go-fips.yml
vendored
59
.github/workflows/go-fips.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: FIPS Build Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Setup dockerfile for build test
|
||||
run: |
|
||||
GO_VERSION=$(go version | cut -d ' ' -f 3 | sed 's/go//')
|
||||
echo Detected go version $GO_VERSION
|
||||
cat > Dockerfile.fips.test <<EOF
|
||||
FROM golang:${GO_VERSION}
|
||||
COPY . /minio
|
||||
WORKDIR /minio
|
||||
ENV GOEXPERIMENT=boringcrypto
|
||||
RUN make
|
||||
EOF
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.fips.test
|
||||
push: false
|
||||
load: true
|
||||
tags: minio/fips-test:latest
|
||||
|
||||
# This should fail if grep returns non-zero exit
|
||||
- name: Test binary
|
||||
run: |
|
||||
docker run --rm minio/fips-test:latest ./minio --version
|
||||
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio | grep FIPS | grep -q FIPS'
|
||||
93
PULL_REQUESTS_ETIQUETTE.md
Normal file
93
PULL_REQUESTS_ETIQUETTE.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# MinIO Pull Request Guidelines
|
||||
|
||||
These guidelines ensure high-quality commits in MinIO’s GitHub repositories, maintaining
|
||||
a clear, valuable commit history for our open-source projects. They apply to all contributors,
|
||||
fostering efficient reviews and robust code.
|
||||
|
||||
## Why Pull Requests?
|
||||
|
||||
Pull Requests (PRs) drive quality in MinIO’s codebase by:
|
||||
- Enabling peer review without pair programming.
|
||||
- Documenting changes for future reference.
|
||||
- Ensuring commits tell a clear story of development.
|
||||
|
||||
**A poor commit lasts forever, even if code is refactored.**
|
||||
|
||||
## Crafting a Quality PR
|
||||
|
||||
A strong MinIO PR:
|
||||
- Delivers a complete, valuable change (feature, bug fix, or improvement).
|
||||
- Has a concise title (e.g., `[S3] Fix bucket policy parsing #1234`) and a summary with context, referencing issues (e.g., `#1234`).
|
||||
- Contains well-written, logical commits explaining *why* changes were made (e.g., “Add S3 bucket tagging support so that users can organize resources efficiently”).
|
||||
- Is small, focused, and easy to review—ideally one commit, unless multiple commits better narrate complex work.
|
||||
- Adheres to MinIO’s coding standards (e.g., Go style, error handling, testing).
|
||||
|
||||
PRs must flow smoothly through review to reach production. Large PRs should be split into smaller, manageable ones.
|
||||
|
||||
## Submitting PRs
|
||||
|
||||
1. **Title and Summary**:
|
||||
- Use a scannable title: `[Subsystem] Action Description #Issue` (e.g., `[IAM] Add role-based access control #567`).
|
||||
- Include context in the summary: what changed, why, and any issue references.
|
||||
- Use `[WIP]` for in-progress PRs to avoid premature merging or choose GitHub draft PRs.
|
||||
|
||||
2. **Commits**:
|
||||
- Write clear messages: what changed and why (e.g., “Refactor S3 API handler to reduce latency so that requests process 20% faster”).
|
||||
- Rebase to tidy commits before submitting (e.g., `git rebase -i main` to squash typos or reword messages), unless multiple contributors worked on the branch.
|
||||
- Keep PRs focused—one feature or fix. Split large changes into multiple PRs.
|
||||
|
||||
3. **Testing**:
|
||||
- Include unit tests for new functionality or bug fixes.
|
||||
- Ensure existing tests pass (`make test`).
|
||||
- Document testing steps in the PR summary if manual testing was performed.
|
||||
|
||||
4. **Before Submitting**:
|
||||
- Run `make verify` to check formatting, linting, and tests.
|
||||
- Reference related issues (e.g., “Closes #1234”).
|
||||
- Notify team members via GitHub `@mentions` if urgent or complex.
|
||||
|
||||
## Reviewing PRs
|
||||
|
||||
Reviewers ensure MinIO’s commit history remains a clear, reliable record. Responsibilities include:
|
||||
|
||||
1. **Commit Quality**:
|
||||
- Verify each commit explains *why* the change was made (e.g., “So that…”).
|
||||
- Request rebasing if commits are unclear, redundant, or lack context (e.g., “Please squash typo fixes into the parent commit”).
|
||||
|
||||
2. **Code Quality**:
|
||||
- Check adherence to MinIO’s Go standards (e.g., error handling, documentation).
|
||||
- Ensure tests cover new code and pass CI.
|
||||
- Flag bugs or critical issues for immediate fixes; suggest non-blocking improvements as follow-up issues.
|
||||
|
||||
3. **Flow**:
|
||||
- Review promptly to avoid blocking progress.
|
||||
- Balance quality and speed—minor issues can be addressed later via issues, not PR blocks.
|
||||
- If unable to complete the review, tag another reviewer (e.g., `@username please take over`).
|
||||
|
||||
4. **Shared Responsibility**:
|
||||
- All MinIO contributors are reviewers. The first commenter on a PR owns the review unless they delegate.
|
||||
- Multiple reviewers are encouraged for complex PRs.
|
||||
|
||||
5. **No Self-Edits**:
|
||||
- Don’t modify the PR directly (e.g., fixing bugs). Request changes from the submitter or create a follow-up PR.
|
||||
- If you edit, you’re a collaborator, not a reviewer, and cannot merge.
|
||||
|
||||
6. **Testing**:
|
||||
- Assume the submitter tested the code. If testing is unclear, ask for details (e.g., “How was this tested?”).
|
||||
- Reject untested PRs unless testing is infeasible, then assist with test setup.
|
||||
|
||||
## Tips for Success
|
||||
|
||||
- **Small PRs**: Easier to review, faster to merge. Split large changes logically.
|
||||
- **Clear Commits**: Use `git rebase -i` to refine history before submitting.
|
||||
- **Engage Early**: Discuss complex changes in issues or Slack (https://slack.min.io) before coding.
|
||||
- **Be Responsive**: Address reviewer feedback promptly to keep PRs moving.
|
||||
- **Learn from Reviews**: Use feedback to improve future contributions.
|
||||
|
||||
## Resources
|
||||
|
||||
- [MinIO Coding Standards](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
- [Effective Commit Messages](https://mislav.net/2014/02/hidden-documentation/)
|
||||
- [GitHub PR Tips](https://github.com/blog/1943-how-to-write-the-perfect-pull-request)
|
||||
|
||||
By following these guidelines, we ensure MinIO’s codebase remains high-quality, maintainable, and a joy to contribute to. Happy coding!
|
||||
@@ -1,7 +0,0 @@
|
||||
# MinIO FIPS Builds
|
||||
|
||||
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
|
||||
|
||||
MinIO FIPS executables are available at <http://dl.min.io> - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
|
||||
|
||||
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.
|
||||
61
README.md
61
README.md
@@ -4,7 +4,13 @@
|
||||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. To learn more about what MinIO is doing for AI storage, go to [AI storage documentation](https://min.io/solutions/object-storage-for-ai).
|
||||
MinIO is a high-performance, S3-compatible object storage solution released under the GNU AGPL v3.0 license. Designed for speed and scalability, it powers AI/ML, analytics, and data-intensive workloads with industry-leading performance.
|
||||
|
||||
🔹 S3 API Compatible – Seamless integration with existing S3 tools
|
||||
🔹 Built for AI & Analytics – Optimized for large-scale data pipelines
|
||||
🔹 High Performance – Ideal for demanding storage workloads.
|
||||
|
||||
AI storage documentation (https://min.io/solutions/object-storage-for-ai).
|
||||
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
@@ -14,7 +20,7 @@ Use the following commands to run a standalone MinIO server as a container.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
|
||||
for more complete documentation.
|
||||
|
||||
### Stable
|
||||
@@ -32,15 +38,17 @@ root credentials. You can use the Browser to create buckets, upload objects, and
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
> [!NOTE]
|
||||
> To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option.
|
||||
> For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
|
||||
## macOS
|
||||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
### Homebrew (recommended)
|
||||
|
||||
@@ -51,7 +59,8 @@ brew install minio/stable/minio
|
||||
minio server /data
|
||||
```
|
||||
|
||||
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
|
||||
> [!NOTE]
|
||||
> If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
|
||||
|
||||
```sh
|
||||
brew uninstall minio
|
||||
@@ -60,7 +69,7 @@ brew install minio/stable/minio
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
|
||||
|
||||
### Binary Download
|
||||
|
||||
@@ -74,7 +83,7 @@ chmod +x minio
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
## GNU/Linux
|
||||
|
||||
@@ -96,9 +105,10 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
## Microsoft Windows
|
||||
|
||||
@@ -116,9 +126,10 @@ minio.exe server D:\
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
## Install from Source
|
||||
|
||||
@@ -130,9 +141,10 @@ go install github.com/minio/minio@latest
|
||||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
@@ -170,7 +182,8 @@ This command gets the active zone(s). Now, apply port rules to the relevant zone
|
||||
firewall-cmd --zone=public --add-port=9000/tcp --permanent
|
||||
```
|
||||
|
||||
Note that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
|
||||
> [!NOTE]
|
||||
> `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
|
||||
|
||||
```sh
|
||||
firewall-cmd --reload
|
||||
@@ -199,7 +212,8 @@ service iptables restart
|
||||
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
|
||||
|
||||
> NOTE: MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
> [!NOTE]
|
||||
> MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
|
||||
### Things to consider
|
||||
|
||||
@@ -215,15 +229,16 @@ For example, consider a MinIO deployment behind a proxy `https://minio.example.n
|
||||
|
||||
## Test using MinIO Client `mc`
|
||||
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) for further instructions.
|
||||
|
||||
## Upgrading MinIO
|
||||
|
||||
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
|
||||
|
||||
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
> [!NOTE]
|
||||
> requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin/mc-admin-update.html)
|
||||
|
||||
```sh
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
@@ -243,10 +258,10 @@ mc admin update <minio alias, e.g., myminio>
|
||||
|
||||
## Explore Further
|
||||
|
||||
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
|
||||
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
|
||||
- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html)
|
||||
- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html)
|
||||
|
||||
## Contribute to MinIO Project
|
||||
|
||||
|
||||
@@ -74,11 +74,11 @@ check_minimum_version() {
|
||||
|
||||
assert_is_supported_arch() {
|
||||
case "${ARCH}" in
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -9,7 +9,7 @@ function _init() {
|
||||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
||||
@@ -445,8 +445,10 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
||||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
Name: svc.Name,
|
||||
Description: svc.Description,
|
||||
})
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
@@ -625,8 +627,10 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
Name: svc.Name,
|
||||
Description: svc.Description,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
|
||||
@@ -173,6 +173,8 @@ func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *htt
|
||||
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
|
||||
continue // skip if no roleArn and no policy claim
|
||||
}
|
||||
// claim-based provider is in the roleArnMap under dummy ARN
|
||||
arn = dummyRoleARN
|
||||
}
|
||||
matchingCfgName, ok := roleArnMap[arn]
|
||||
if !ok {
|
||||
|
||||
@@ -61,7 +61,7 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
if z.IsRebalanceStarted() {
|
||||
if z.IsRebalanceStarted(ctx) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
|
||||
return
|
||||
}
|
||||
@@ -277,7 +277,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
if pools.IsRebalanceStarted() {
|
||||
if pools.IsRebalanceStarted(ctx) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -304,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error {
|
||||
data, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return SRError{
|
||||
|
||||
@@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
|
||||
// Create a policy policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -113,7 +113,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
userCount := 50
|
||||
accessKeys := make([]string, userCount)
|
||||
secretKeys := make([]string, userCount)
|
||||
for i := 0; i < userCount; i++ {
|
||||
for i := range userCount {
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
@@ -133,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
||||
}
|
||||
|
||||
g := errgroup.Group{}
|
||||
for i := 0; i < userCount; i++ {
|
||||
for i := range userCount {
|
||||
g.Go(func(i int) func() error {
|
||||
return func() error {
|
||||
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
@@ -157,9 +158,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for k, v := range ldapUsers {
|
||||
allCredentials[k] = v
|
||||
}
|
||||
maps.Copy(allCredentials, ldapUsers)
|
||||
|
||||
// Marshal the response
|
||||
data, err := json.Marshal(allCredentials)
|
||||
@@ -1827,16 +1826,18 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundGroupDN == nil || !underBaseDN {
|
||||
err = errNoSuchGroup
|
||||
} else {
|
||||
entityName = foundGroupDN.NormDN
|
||||
}
|
||||
entityName = foundGroupDN.NormDN
|
||||
} else {
|
||||
var foundUserDN *xldap.DNSearchResult
|
||||
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundUserDN == nil {
|
||||
err = errNoSuchUser
|
||||
} else {
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
@@ -2947,7 +2948,7 @@ func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.
|
||||
name: createReq.Name,
|
||||
description: description,
|
||||
expiration: createReq.Expiration,
|
||||
claims: make(map[string]interface{}),
|
||||
claims: make(map[string]any),
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
|
||||
@@ -332,7 +332,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
|
||||
// 2.2 create and associate policy to user
|
||||
policy := "mypolicy-test-user-update"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -355,7 +355,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -562,7 +562,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
|
||||
// 1. Create a policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -585,7 +585,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -680,7 +680,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -703,7 +703,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
@@ -739,7 +739,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
}
|
||||
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -762,7 +762,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -911,7 +911,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -934,7 +934,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -995,7 +995,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -1026,7 +1026,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -1093,7 +1093,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -1116,7 +1116,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
@@ -1367,7 +1367,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -1381,7 +1381,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
@@ -1558,7 +1558,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s
|
||||
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
|
||||
c.Helper()
|
||||
versions := []string{}
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("upload did not succeed got %#v", err)
|
||||
@@ -1627,7 +1627,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -1641,7 +1641,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
@@ -1655,7 +1655,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
|
||||
// This policy allows listing objects.
|
||||
newPolicyBytes := []byte(fmt.Sprintf(`{
|
||||
newPolicyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
@@ -1668,7 +1668,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewPolicy: newPolicyBytes,
|
||||
})
|
||||
|
||||
@@ -954,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ
|
||||
|
||||
var args dsync.LockArgs
|
||||
var lockers []dsync.NetLocker
|
||||
for _, path := range strings.Split(vars["paths"], ",") {
|
||||
for path := range strings.SplitSeq(vars["paths"], ",") {
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
@@ -1193,7 +1193,7 @@ type dummyFileInfo struct {
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
sys interface{}
|
||||
sys any
|
||||
}
|
||||
|
||||
func (f dummyFileInfo) Name() string { return f.name }
|
||||
@@ -1201,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size }
|
||||
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
|
||||
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
|
||||
func (f dummyFileInfo) IsDir() bool { return f.isDir }
|
||||
func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
||||
func (f dummyFileInfo) Sys() any { return f.sys }
|
||||
|
||||
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
|
||||
// ----------
|
||||
|
||||
@@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool {
|
||||
func TestTopLockEntries(t *testing.T) {
|
||||
locksHeld := make(map[string][]lockRequesterInfo)
|
||||
var owners []string
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
owners = append(owners, fmt.Sprintf("node-%d", i))
|
||||
}
|
||||
|
||||
@@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
// request UID, but 10 different resource names associated with it.
|
||||
var lris []lockRequesterInfo
|
||||
uuid := mustGetUUID()
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
resource := fmt.Sprintf("bucket/delete-object-%d", i)
|
||||
lri := lockRequesterInfo{
|
||||
Name: resource,
|
||||
@@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) {
|
||||
}
|
||||
|
||||
// Add a few concurrent read locks to the mix
|
||||
for i := 0; i < 50; i++ {
|
||||
for i := range 50 {
|
||||
resource := fmt.Sprintf("bucket/get-object-%d", i)
|
||||
lri := lockRequesterInfo{
|
||||
Name: resource,
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
@@ -520,9 +521,7 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
|
||||
for k, v := range h.scannedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.scannedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
@@ -534,9 +533,7 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
|
||||
for k, v := range h.healedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.healedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
@@ -549,9 +546,7 @@ func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.healFailedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -64,7 +65,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
// Encodes the response headers into XML format.
|
||||
func encodeResponse(response interface{}) []byte {
|
||||
func encodeResponse(response any) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xml.Header)
|
||||
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
@@ -82,7 +83,7 @@ func encodeResponse(response interface{}) []byte {
|
||||
// Do not use this function for anything other than ListObjects()
|
||||
// variants, please open a github discussion if you wish to use
|
||||
// this in other places.
|
||||
func encodeResponseList(response interface{}) []byte {
|
||||
func encodeResponseList(response any) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xxml.Header)
|
||||
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
@@ -93,7 +94,7 @@ func encodeResponseList(response interface{}) []byte {
|
||||
}
|
||||
|
||||
// Encodes the response headers into JSON format.
|
||||
func encodeResponseJSON(response interface{}) []byte {
|
||||
func encodeResponseJSON(response any) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
e := json.NewEncoder(&bytesBuffer)
|
||||
e.Encode(response)
|
||||
@@ -168,6 +169,32 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
|
||||
if !stringsHasPrefixFold(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
// check the doc https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||
// For metadata values like "ö", "ÄMÄZÕÑ S3", and "öha, das sollte eigentlich
|
||||
// funktionieren", tested against a real AWS S3 bucket, S3 may encode incorrectly. For
|
||||
// example, "ö" was encoded as =?UTF-8?B?w4PCtg==?=, producing invalid UTF-8 instead
|
||||
// of =?UTF-8?B?w7Y=?=. This mirrors errors like the ä½ in another string.
|
||||
//
|
||||
// S3 uses B-encoding (Base64) for non-ASCII-heavy metadata and Q-encoding
|
||||
// (quoted-printable) for mostly ASCII strings. Long strings are split at word
|
||||
// boundaries to fit RFC 2047’s 75-character limit, ensuring HTTP parser
|
||||
// compatibility.
|
||||
//
|
||||
// However, this splitting increases header size and can introduce errors, unlike Go’s
|
||||
// mime package in MinIO, which correctly encodes strings with fixed B/Q encodings,
|
||||
// avoiding S3’s heuristic-driven issues.
|
||||
//
|
||||
// For MinIO developers, decode S3 metadata with mime.WordDecoder, validate outputs,
|
||||
// report encoding bugs to AWS, and use ASCII-only metadata to ensure reliable S3 API
|
||||
// compatibility.
|
||||
if needsMimeEncoding(v) {
|
||||
// see https://github.com/golang/go/blob/release-branch.go1.24/src/net/mail/message.go#L325
|
||||
if strings.ContainsAny(v, "\"#$%&'(),.:;<>@[]^`{|}~") {
|
||||
v = mime.BEncoding.Encode("UTF-8", v)
|
||||
} else {
|
||||
v = mime.QEncoding.Encode("UTF-8", v)
|
||||
}
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
@@ -229,3 +256,14 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// needsEncoding reports whether s contains any bytes that need to be encoded.
|
||||
// see mime.needsEncoding
|
||||
func needsMimeEncoding(s string) bool {
|
||||
for _, b := range s {
|
||||
if (b < ' ' || b > '~') && b != '\t' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -100,7 +100,6 @@ func TestObjectLocation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
|
||||
@@ -387,6 +387,11 @@ func registerAPIRouter(router *mux.Router) {
|
||||
HeadersRegexp(xhttp.AmzSnowballExtract, "true").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectExtractHandler, traceHdrsS3HFlag))
|
||||
|
||||
// AppendObject to be rejected
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HeadersRegexp(xhttp.AmzWriteOffsetBytes, "").
|
||||
HandlerFunc(s3APIMiddleware(errorResponseHandler))
|
||||
|
||||
// PutObject
|
||||
router.Methods(http.MethodPut).Path("/{object:.+}").
|
||||
HandlerFunc(s3APIMiddleware(api.PutObjectHandler, traceHdrsS3HFlag))
|
||||
|
||||
@@ -43,7 +43,7 @@ func shouldEscape(c byte) bool {
|
||||
// - Force encoding of '~'
|
||||
func s3URLEncode(s string) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
c := s[i]
|
||||
if shouldEscape(c) {
|
||||
if c == ' ' {
|
||||
@@ -70,7 +70,7 @@ func s3URLEncode(s string) string {
|
||||
|
||||
if hexCount == 0 {
|
||||
copy(t, s)
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
if s[i] == ' ' {
|
||||
t[i] = '+'
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func s3URLEncode(s string) string {
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
switch c := s[i]; {
|
||||
case c == ' ':
|
||||
t[j] = '+'
|
||||
|
||||
@@ -216,7 +216,7 @@ func getSessionToken(r *http.Request) (token string) {
|
||||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]any {
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
@@ -266,7 +266,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error)
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(token string) (map[string]any, error) {
|
||||
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -275,7 +275,7 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
|
||||
@@ -102,7 +102,7 @@ func waitForLowHTTPReq() {
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
bgSeq := newBgHealSequence()
|
||||
// Run the background healer
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
for range globalBackgroundHealRoutine.workers {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -269,12 +270,7 @@ func (h *healingTracker) delete(ctx context.Context) error {
|
||||
func (h *healingTracker) isHealed(bucket string) bool {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
for _, v := range h.HealedBuckets {
|
||||
if v == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(h.HealedBuckets, bucket)
|
||||
}
|
||||
|
||||
// resume will reset progress to the numbers at the start of the bucket.
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -248,7 +249,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
||||
pInfo PartInfo
|
||||
)
|
||||
|
||||
for i := 0; i < partsCount; i++ {
|
||||
for i := range partsCount {
|
||||
gopts := minio.GetObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
PartNumber: i + 1,
|
||||
@@ -574,9 +575,7 @@ func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
|
||||
oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass
|
||||
}
|
||||
|
||||
for k, v := range objInfo.UserMetadata {
|
||||
oi.UserDefined[k] = v
|
||||
}
|
||||
maps.Copy(oi.UserDefined, objInfo.UserMetadata)
|
||||
|
||||
return oi
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error {
|
||||
type BatchJobSize int64
|
||||
|
||||
// UnmarshalYAML to parse humanized byte values
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var batchExpireSz string
|
||||
err := unmarshal(&batchExpireSz)
|
||||
if err != nil {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
@@ -110,9 +111,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
||||
}
|
||||
}
|
||||
e.kmsContext = kms.Context{}
|
||||
for k, v := range ctx {
|
||||
e.kmsContext[k] = v
|
||||
}
|
||||
maps.Copy(e.kmsContext, ctx)
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
@@ -225,9 +224,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
|
||||
// Since we are rotating the keys, make sure to update the metadata.
|
||||
oi.metadataOnly = true
|
||||
oi.keyRotation = true
|
||||
for k, v := range encMetadata {
|
||||
oi.UserDefined[k] = v
|
||||
}
|
||||
maps.Copy(oi.UserDefined, encMetadata)
|
||||
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
|
||||
VersionID: oi.VersionID,
|
||||
}, ObjectOptions{
|
||||
|
||||
@@ -51,8 +51,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
@@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
// insert the object.
|
||||
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
|
||||
for j := 0; j < totalPartsNR; j++ {
|
||||
for j := range totalPartsNR {
|
||||
if j < totalPartsNR-1 {
|
||||
textPartData = textData[j*partSize : (j+1)*partSize-1]
|
||||
} else {
|
||||
|
||||
@@ -154,7 +154,6 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
||||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
|
||||
}, index)
|
||||
@@ -1089,6 +1088,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
break
|
||||
}
|
||||
|
||||
// check if have a file
|
||||
if reader == nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing"))
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if keyName, ok := formValues["Key"]; !ok {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))
|
||||
@@ -1379,10 +1386,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// Set the correct hex md5sum for the fan-out stream.
|
||||
fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil))
|
||||
|
||||
concurrentSize := 100
|
||||
if runtime.GOMAXPROCS(0) < concurrentSize {
|
||||
concurrentSize = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
concurrentSize := min(runtime.GOMAXPROCS(0), 100)
|
||||
|
||||
fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries))
|
||||
eventArgsList := make([]eventArgs, 0, len(fanOutEntries))
|
||||
@@ -1653,9 +1657,11 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.HeadBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
|
||||
@@ -657,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
sha256sum := ""
|
||||
var objectNames []string
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
contentBytes := []byte("hello")
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
if i == 0 {
|
||||
@@ -687,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
|
||||
// The following block will create a bucket policy with delete object to 'public/*'. This is
|
||||
// to test a mixed response of a successful & failure while deleting objects in a single request
|
||||
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
|
||||
policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
|
||||
credentials.AccessKey, credentials.SecretKey, nil)
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -959,9 +960,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
UserDefined: meta,
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
maps.Copy(meta, objInfo.UserDefined)
|
||||
if len(objInfo.UserTags) != 0 {
|
||||
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
|
||||
@@ -472,7 +472,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
||||
return meta, reloaded, nil
|
||||
}
|
||||
|
||||
val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) {
|
||||
val, err, _ := sys.group.Do(bucket, func() (val any, err error) {
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
@@ -511,7 +511,6 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
|
||||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketMetas := make([]BucketMetadata, len(buckets))
|
||||
for index := range buckets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
// Sleep and stagger to avoid blocked CPU and thundering
|
||||
// herd upon start up sequence.
|
||||
|
||||
@@ -38,7 +38,6 @@ import (
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
@@ -555,8 +554,8 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
|
||||
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
|
||||
crypto.S3.CreateMetadata(metadata, key, sealedKey)
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
|
||||
if err != nil {
|
||||
return output, metabytes, err
|
||||
}
|
||||
@@ -590,6 +589,6 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
||||
}
|
||||
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
|
||||
return outbuf.Bytes(), err
|
||||
}
|
||||
|
||||
@@ -297,6 +297,9 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
|
||||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, lerr)
|
||||
}
|
||||
if legalHoldPermErr != ErrNone {
|
||||
return mode, retainDate, legalHold, legalHoldPermErr
|
||||
}
|
||||
}
|
||||
|
||||
if retentionRequested {
|
||||
|
||||
@@ -122,7 +122,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Sync start.
|
||||
@@ -187,7 +187,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Test case - 1.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
@@ -199,7 +199,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Expecting StatusBadRequest (400).
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: maxBucketPolicySize + 1,
|
||||
accessKey: credentials.AccessKey,
|
||||
@@ -211,7 +211,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// Expecting the HTTP response status to be StatusLengthRequired (411).
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: 0,
|
||||
accessKey: credentials.AccessKey,
|
||||
@@ -258,7 +258,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// checkBucketPolicyResources should fail.
|
||||
{
|
||||
bucketName: bucketName1,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
@@ -271,7 +271,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// should result in 404 StatusNotFound
|
||||
{
|
||||
bucketName: "non-existent-bucket",
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket")),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
@@ -284,7 +284,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// should result in 404 StatusNotFound
|
||||
{
|
||||
bucketName: ".invalid-bucket",
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket")),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
@@ -297,7 +297,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// should result in 400 StatusBadRequest.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -187,9 +188,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
|
||||
}
|
||||
|
||||
cloneURLValues := make(url.Values, len(r.Form))
|
||||
for k, v := range r.Form {
|
||||
cloneURLValues[k] = v
|
||||
}
|
||||
maps.Copy(cloneURLValues, r.Form)
|
||||
|
||||
for _, objLock := range []string{
|
||||
xhttp.AmzObjectLockMode,
|
||||
@@ -224,7 +223,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
|
||||
// Add groups claim which could be a list. This will ensure that the claim
|
||||
// `jwt:groups` works.
|
||||
if grpsVal, ok := claims["groups"]; ok {
|
||||
if grpsIs, ok := grpsVal.([]interface{}); ok {
|
||||
if grpsIs, ok := grpsVal.([]any); ok {
|
||||
grps := []string{}
|
||||
for _, gI := range grpsIs {
|
||||
if g, ok := gI.(string); ok {
|
||||
|
||||
@@ -92,7 +92,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
||||
}
|
||||
if !quotaCfg.IsValid() {
|
||||
if quotaCfg.Type == "fifo" {
|
||||
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
|
||||
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc quota clear alias/bucket' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
|
||||
return quotaCfg, fmt.Errorf("invalid quota type 'fifo'")
|
||||
}
|
||||
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -311,7 +312,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
for p := range strings.SplitSeq(s, ",") {
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
@@ -735,9 +736,7 @@ type BucketReplicationResyncStatus struct {
|
||||
|
||||
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
|
||||
m = make(map[string]TargetReplicationResyncStatus)
|
||||
for arn, st := range rs.TargetsMap {
|
||||
m[arn] = st
|
||||
}
|
||||
maps.Copy(m, rs.TargetsMap)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -803,9 +804,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
|
||||
} else {
|
||||
cs, mp := getCRCMeta(objInfo, 0, nil)
|
||||
// Set object checksum.
|
||||
for k, v := range cs {
|
||||
meta[k] = v
|
||||
}
|
||||
maps.Copy(meta, cs)
|
||||
isMP = mp
|
||||
if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject {
|
||||
// For objects where checksum is full object, it will be the same.
|
||||
@@ -969,9 +968,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
|
||||
|
||||
t, _ := tags.ParseObjectTags(oi1.UserTags)
|
||||
oi2Map := make(map[string]string)
|
||||
for k, v := range oi2.UserTags {
|
||||
oi2Map[k] = v
|
||||
}
|
||||
maps.Copy(oi2Map, oi2.UserTags)
|
||||
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
|
||||
return replicateMetadata
|
||||
}
|
||||
@@ -1770,9 +1767,7 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri
|
||||
}
|
||||
if !copied {
|
||||
dst = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
dst[k] = v
|
||||
}
|
||||
maps.Copy(dst, metadata)
|
||||
copied = true
|
||||
}
|
||||
delete(dst, key)
|
||||
@@ -2954,7 +2949,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < resyncParallelRoutines; i++ {
|
||||
for i := range resyncParallelRoutines {
|
||||
wg.Add(1)
|
||||
workers[i] = make(chan ReplicateObjectInfo, 100)
|
||||
i := i
|
||||
@@ -3063,7 +3058,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
||||
workers[h%uint64(resyncParallelRoutines)] <- roi
|
||||
}
|
||||
}
|
||||
for i := 0; i < resyncParallelRoutines; i++ {
|
||||
for i := range resyncParallelRoutines {
|
||||
xioutil.SafeClose(workers[i])
|
||||
}
|
||||
wg.Wait()
|
||||
@@ -3193,11 +3188,9 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []stri
|
||||
<-ctx.Done()
|
||||
return
|
||||
}
|
||||
duration := time.Duration(r.Float64() * float64(time.Minute))
|
||||
if duration < time.Second {
|
||||
duration := max(time.Duration(r.Float64()*float64(time.Minute)),
|
||||
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
}
|
||||
time.Second)
|
||||
time.Sleep(duration)
|
||||
}
|
||||
}
|
||||
@@ -3797,14 +3790,13 @@ func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) (cs map[string]string
|
||||
meta := make(map[string]string)
|
||||
cs, isMP = oi.decryptChecksums(partNum, h)
|
||||
for k, v := range cs {
|
||||
cksum := hash.NewChecksumString(k, v)
|
||||
if cksum == nil {
|
||||
if k == xhttp.AmzChecksumType {
|
||||
continue
|
||||
}
|
||||
if cksum.Valid() {
|
||||
meta[cksum.Type.Key()] = v
|
||||
meta[xhttp.AmzChecksumType] = cs[xhttp.AmzChecksumType]
|
||||
meta[xhttp.AmzChecksumAlgo] = cksum.Type.String()
|
||||
cktype := hash.ChecksumStringToType(k)
|
||||
if cktype.IsSet() {
|
||||
meta[cktype.Key()] = v
|
||||
meta[xhttp.AmzChecksumAlgo] = cktype.String()
|
||||
}
|
||||
}
|
||||
return meta, isMP
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -221,9 +222,7 @@ func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
|
||||
}
|
||||
if s.Failed.ErrCounts == nil {
|
||||
s.Failed.ErrCounts = make(map[string]int)
|
||||
for k, v := range st.Failed.ErrCounts {
|
||||
s.Failed.ErrCounts[k] = v
|
||||
}
|
||||
maps.Copy(s.Failed.ErrCounts, st.Failed.ErrCounts)
|
||||
}
|
||||
c.Stats[arn] = &s
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"maps"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -236,9 +237,7 @@ func (sys *BucketTargetSys) healthStats() map[string]epHealth {
|
||||
sys.hMutex.RLock()
|
||||
defer sys.hMutex.RUnlock()
|
||||
m := make(map[string]epHealth, len(sys.hc))
|
||||
for k, v := range sys.hc {
|
||||
m[k] = v
|
||||
}
|
||||
maps.Copy(m, sys.hc)
|
||||
return m
|
||||
}
|
||||
|
||||
|
||||
@@ -57,11 +57,9 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) {
|
||||
|
||||
// callhome running on a different node.
|
||||
// sleep for some time and try again.
|
||||
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur()))
|
||||
if duration < time.Second {
|
||||
duration := max(time.Duration(r.Float64()*float64(globalCallhomeConfig.FrequencyDur())),
|
||||
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
}
|
||||
time.Second)
|
||||
time.Sleep(duration)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -105,7 +105,7 @@ func init() {
|
||||
gob.Register(madmin.TimeInfo{})
|
||||
gob.Register(madmin.XFSErrorConfigs{})
|
||||
gob.Register(map[string]string{})
|
||||
gob.Register(map[string]interface{}{})
|
||||
gob.Register(map[string]any{})
|
||||
|
||||
// All minio-go and madmin-go API operations shall be performed only once,
|
||||
// another way to look at this is we are turning off retries.
|
||||
@@ -258,7 +258,7 @@ func initConsoleServer() (*consoleapi.Server, error) {
|
||||
|
||||
if !serverDebugLog {
|
||||
// Disable console logging if server debug log is not enabled
|
||||
noLog := func(string, ...interface{}) {}
|
||||
noLog := func(string, ...any) {}
|
||||
|
||||
consoleapi.LogInfo = noLog
|
||||
consoleapi.LogError = noLog
|
||||
@@ -761,7 +761,7 @@ func serverHandleEnvVars() {
|
||||
|
||||
domains := env.Get(config.EnvDomain, "")
|
||||
if len(domains) != 0 {
|
||||
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
|
||||
for domainName := range strings.SplitSeq(domains, config.ValueSeparator) {
|
||||
if _, ok := dns2.IsDomainName(domainName); !ok {
|
||||
logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName),
|
||||
"Invalid MINIO_DOMAIN value in environment variable")
|
||||
@@ -1059,6 +1059,6 @@ func (a bgCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func (a bgCtx) Value(key interface{}) interface{} {
|
||||
func (a bgCtx) Value(key any) any {
|
||||
return a.parent.Value(key)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ func Test_readFromSecret(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
|
||||
if err != nil {
|
||||
@@ -155,7 +154,6 @@ MINIO_ROOT_PASSWORD=minio123`,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
|
||||
if err != nil {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -78,12 +79,8 @@ func initHelp() {
|
||||
config.BatchSubSys: batch.DefaultKVS,
|
||||
config.BrowserSubSys: browser.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
}
|
||||
for k, v := range lambda.DefaultLambdaKVS {
|
||||
kvs[k] = v
|
||||
}
|
||||
maps.Copy(kvs, notify.DefaultNotificationKVS)
|
||||
maps.Copy(kvs, lambda.DefaultLambdaKVS)
|
||||
if globalIsErasure {
|
||||
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
|
||||
kvs[config.HealSubSys] = heal.DefaultKVS
|
||||
@@ -355,7 +352,9 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
||||
}
|
||||
case config.IdentityOpenIDSubSys:
|
||||
if _, err := openid.LookupConfig(s,
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
xhttp.WithUserAgent(NewHTTPTransport(), func() string {
|
||||
return getUserAgent(getMinioMode())
|
||||
}), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.IdentityLDAPSubSys:
|
||||
|
||||
@@ -38,12 +38,12 @@ import (
|
||||
)
|
||||
|
||||
// Save config file to corresponding backend
|
||||
func Save(configFile string, data interface{}) error {
|
||||
func Save(configFile string, data any) error {
|
||||
return quick.SaveConfig(data, configFile, globalEtcdClient)
|
||||
}
|
||||
|
||||
// Load config from backend
|
||||
func Load(configFile string, data interface{}) (quick.Config, error) {
|
||||
func Load(configFile string, data any) (quick.Config, error) {
|
||||
return quick.LoadConfig(configFile, globalEtcdClient, data)
|
||||
}
|
||||
|
||||
|
||||
@@ -129,7 +129,7 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte)
|
||||
return saveConfig(ctx, objAPI, historyFile, kv)
|
||||
}
|
||||
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error {
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg any) error {
|
||||
data, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/minio/madmin-go/v3/logger/log"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/logger/target/console"
|
||||
"github.com/minio/minio/internal/logger/target/types"
|
||||
types "github.com/minio/minio/internal/logger/target/loggertypes"
|
||||
"github.com/minio/minio/internal/pubsub"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
@@ -101,7 +101,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
|
||||
|
||||
lastN = make([]log.Info, last)
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
sys.logBuf.Do(func(p any) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok && lg.SendLog(node, logKind) {
|
||||
@@ -113,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
|
||||
sys.RUnlock()
|
||||
// send last n console log messages in order filtered by node
|
||||
if cnt > 0 {
|
||||
for i := 0; i < last; i++ {
|
||||
for i := range last {
|
||||
entry := lastN[(cnt+i)%last]
|
||||
if (entry == log.Info{}) {
|
||||
continue
|
||||
@@ -155,7 +155,7 @@ func (sys *HTTPConsoleLoggerSys) Stats() types.TargetStats {
|
||||
// Content returns the console stdout log
|
||||
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
sys.logBuf.Do(func(p any) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok {
|
||||
@@ -181,7 +181,7 @@ func (sys *HTTPConsoleLoggerSys) Type() types.TargetType {
|
||||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry interface{}) error {
|
||||
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry any) error {
|
||||
var lg log.Info
|
||||
switch e := entry.(type) {
|
||||
case log.Entry:
|
||||
|
||||
@@ -198,7 +198,7 @@ func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(p
|
||||
func (p *scannerMetrics) getCurrentPaths() []string {
|
||||
var res []string
|
||||
prefix := globalLocalNodeName + "/"
|
||||
p.currentPaths.Range(func(key, value interface{}) bool {
|
||||
p.currentPaths.Range(func(key, value any) bool {
|
||||
// We are a bit paranoid, but better miss an entry than crash.
|
||||
name, ok := key.(string)
|
||||
if !ok {
|
||||
@@ -221,7 +221,7 @@ func (p *scannerMetrics) getCurrentPaths() []string {
|
||||
// (since this is concurrent it may not be 100% reliable)
|
||||
func (p *scannerMetrics) activeDrives() int {
|
||||
var i int
|
||||
p.currentPaths.Range(func(k, v interface{}) bool {
|
||||
p.currentPaths.Range(func(k, v any) bool {
|
||||
i++
|
||||
return true
|
||||
})
|
||||
@@ -299,7 +299,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
|
||||
m.CollectedAt = time.Now()
|
||||
m.ActivePaths = p.getCurrentPaths()
|
||||
m.LifeTimeOps = make(map[string]uint64, scannerMetricLast)
|
||||
for i := scannerMetric(0); i < scannerMetricLast; i++ {
|
||||
for i := range scannerMetricLast {
|
||||
if n := atomic.LoadUint64(&p.operations[i]); n > 0 {
|
||||
m.LifeTimeOps[i.String()] = n
|
||||
}
|
||||
@@ -309,7 +309,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
|
||||
}
|
||||
|
||||
m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime)
|
||||
for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ {
|
||||
for i := range scannerMetricLastRealtime {
|
||||
lm := p.lastMinute(i)
|
||||
if lm.N > 0 {
|
||||
m.LastMinute.Actions[i.String()] = lm.asTimedAction()
|
||||
|
||||
@@ -78,11 +78,9 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
// Run the data scanner in a loop
|
||||
for {
|
||||
runDataScanner(ctx, objAPI)
|
||||
duration := time.Duration(r.Float64() * float64(scannerCycle.Load()))
|
||||
if duration < time.Second {
|
||||
duration := max(time.Duration(r.Float64()*float64(scannerCycle.Load())),
|
||||
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
}
|
||||
time.Second)
|
||||
time.Sleep(duration)
|
||||
}
|
||||
}()
|
||||
@@ -332,7 +330,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, drive *xlStorage, c
|
||||
}
|
||||
|
||||
var skipHeal atomic.Bool
|
||||
if globalIsErasure || cache.Info.SkipHealing {
|
||||
if !globalIsErasure || cache.Info.SkipHealing {
|
||||
skipHeal.Store(true)
|
||||
}
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
|
||||
v2 uuid-2 modTime -3m
|
||||
v1 uuid-1 modTime -4m
|
||||
*/
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
fivs[i] = FileInfo{
|
||||
Volume: bucket,
|
||||
Name: obj,
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"path"
|
||||
@@ -99,9 +100,7 @@ func (ats *allTierStats) clone() *allTierStats {
|
||||
}
|
||||
dst := *ats
|
||||
dst.Tiers = make(map[string]tierStats, len(ats.Tiers))
|
||||
for tier, st := range ats.Tiers {
|
||||
dst.Tiers[tier] = st
|
||||
}
|
||||
maps.Copy(dst.Tiers, ats.Tiers)
|
||||
return &dst
|
||||
}
|
||||
|
||||
@@ -347,9 +346,7 @@ func (e dataUsageEntry) clone() dataUsageEntry {
|
||||
// We operate on a copy from the receiver.
|
||||
if e.Children != nil {
|
||||
ch := make(dataUsageHashMap, len(e.Children))
|
||||
for k, v := range e.Children {
|
||||
ch[k] = v
|
||||
}
|
||||
maps.Copy(ch, e.Children)
|
||||
e.Children = ch
|
||||
}
|
||||
|
||||
|
||||
@@ -179,7 +179,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
for range dataUsageUpdateDirCycles {
|
||||
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
@@ -428,7 +428,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
for range dataUsageUpdateDirCycles {
|
||||
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
@@ -526,13 +526,13 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
|
||||
// generateUsageTestFiles create nFolders * nFiles files of size bytes each.
|
||||
func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) {
|
||||
pl := make([]byte, size)
|
||||
for i := 0; i < nFolders; i++ {
|
||||
for i := range nFolders {
|
||||
name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt")
|
||||
err := os.MkdirAll(filepath.Dir(name), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for j := 0; j < nFiles; j++ {
|
||||
for j := range nFiles {
|
||||
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
|
||||
err = os.WriteFile(name, pl, os.ModePerm)
|
||||
if err != nil {
|
||||
@@ -618,7 +618,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
||||
}
|
||||
|
||||
// equalAsJSON returns whether the values are equal when encoded as JSON.
|
||||
func equalAsJSON(a, b interface{}) bool {
|
||||
func equalAsJSON(a, b any) bool {
|
||||
aj, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -129,12 +129,9 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
|
||||
|
||||
if failPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
// We are hitting the timeout too often, so increase the timeout by 25%
|
||||
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100
|
||||
|
||||
// Set upper cap.
|
||||
if timeout > int64(maxDynamicTimeout) {
|
||||
timeout = int64(maxDynamicTimeout)
|
||||
}
|
||||
timeout := min(
|
||||
// Set upper cap.
|
||||
atomic.LoadInt64(&dt.timeout)*125/100, int64(maxDynamicTimeout))
|
||||
// Safety, shouldn't happen
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) {
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogFailure()
|
||||
}
|
||||
|
||||
@@ -46,13 +46,13 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) {
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogFailure()
|
||||
}
|
||||
|
||||
adjusted := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogFailure()
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) {
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(20 * time.Second)
|
||||
}
|
||||
|
||||
@@ -84,13 +84,13 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(20 * time.Second)
|
||||
}
|
||||
|
||||
adjusted := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(20 * time.Second)
|
||||
}
|
||||
|
||||
@@ -107,8 +107,8 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
||||
initial := timeout.Timeout()
|
||||
|
||||
const successTimeout = 20 * time.Second
|
||||
for l := 0; l < 100; l++ {
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range 100 {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(successTimeout)
|
||||
}
|
||||
}
|
||||
@@ -129,8 +129,8 @@ func TestDynamicTimeoutConcurrent(t *testing.T) {
|
||||
rng := rand.New(rand.NewSource(int64(i)))
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 100; i++ {
|
||||
for j := 0; j < 100; j++ {
|
||||
for range 100 {
|
||||
for range 100 {
|
||||
timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64()))
|
||||
}
|
||||
to := timeout.Timeout()
|
||||
@@ -150,8 +150,8 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
||||
initial := timeout.Timeout()
|
||||
|
||||
const successTimeout = 20 * time.Second
|
||||
for l := 0; l < 100; l++ {
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range 100 {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(successTimeout)
|
||||
}
|
||||
}
|
||||
@@ -166,13 +166,9 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
||||
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
|
||||
const successTimeout = 20 * time.Second
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
rnd := f()
|
||||
duration := time.Duration(float64(successTimeout) * rnd)
|
||||
|
||||
if duration < 100*time.Millisecond {
|
||||
duration = 100 * time.Millisecond
|
||||
}
|
||||
duration := max(time.Duration(float64(successTimeout)*rnd), 100*time.Millisecond)
|
||||
if duration >= time.Minute {
|
||||
timeout.LogFailure()
|
||||
} else {
|
||||
@@ -188,7 +184,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for try := 0; try < 10; try++ {
|
||||
for range 10 {
|
||||
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
|
||||
}
|
||||
|
||||
@@ -205,7 +201,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
||||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for try := 0; try < 10; try++ {
|
||||
for range 10 {
|
||||
testDynamicTimeoutAdjust(t, timeout, func() float64 {
|
||||
return 1.0 + rand.NormFloat64()
|
||||
})
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -37,7 +38,6 @@ import (
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
@@ -118,10 +118,7 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
|
||||
names = make([]string, 0, BatchSize)
|
||||
)
|
||||
for len(objects) > 0 {
|
||||
N := BatchSize
|
||||
if len(objects) < BatchSize {
|
||||
N = len(objects)
|
||||
}
|
||||
N := min(len(objects), BatchSize)
|
||||
batch := objects[:N]
|
||||
|
||||
// We have to decrypt only ETags of SSE-S3 single-part
|
||||
@@ -293,7 +290,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
||||
return err
|
||||
}
|
||||
sealedKey = objectKey.Seal(newKey.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
|
||||
crypto.S3.CreateMetadata(metadata, newKey, sealedKey)
|
||||
crypto.S3.CreateMetadata(metadata, newKey.KeyID, newKey.Ciphertext, sealedKey)
|
||||
return nil
|
||||
case crypto.S3KMS:
|
||||
if GlobalKMS == nil {
|
||||
@@ -318,9 +315,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
||||
// of the client provided context and add the bucket
|
||||
// key, if not present.
|
||||
kmsCtx := kms.Context{}
|
||||
for k, v := range cryptoCtx {
|
||||
kmsCtx[k] = v
|
||||
}
|
||||
maps.Copy(kmsCtx, cryptoCtx)
|
||||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
@@ -333,7 +328,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
||||
}
|
||||
|
||||
sealedKey := objectKey.Seal(newKey.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3KMS.String(), bucket, object)
|
||||
crypto.S3KMS.CreateMetadata(metadata, newKey, sealedKey, cryptoCtx)
|
||||
crypto.S3KMS.CreateMetadata(metadata, newKey.KeyID, newKey.Ciphertext, sealedKey, cryptoCtx)
|
||||
return nil
|
||||
case crypto.SSEC:
|
||||
sealedKey, err := crypto.SSEC.ParseMetadata(metadata)
|
||||
@@ -376,7 +371,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
||||
|
||||
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
|
||||
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object)
|
||||
crypto.S3.CreateMetadata(metadata, key, sealedKey)
|
||||
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
|
||||
return objectKey, nil
|
||||
case crypto.S3KMS:
|
||||
if GlobalKMS == nil {
|
||||
@@ -390,9 +385,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
||||
// of the client provided context and add the bucket
|
||||
// key, if not present.
|
||||
kmsCtx := kms.Context{}
|
||||
for k, v := range cryptoCtx {
|
||||
kmsCtx[k] = v
|
||||
}
|
||||
maps.Copy(kmsCtx, cryptoCtx)
|
||||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
@@ -409,7 +402,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
||||
|
||||
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
|
||||
sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3KMS.String(), bucket, object)
|
||||
crypto.S3KMS.CreateMetadata(metadata, key, sealedKey, cryptoCtx)
|
||||
crypto.S3KMS.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey, cryptoCtx)
|
||||
return objectKey, nil
|
||||
case crypto.SSEC:
|
||||
objectKey := crypto.GenerateKey(key, rand.Reader)
|
||||
@@ -427,7 +420,7 @@ func newEncryptReader(ctx context.Context, content io.Reader, kind crypto.Type,
|
||||
return nil, crypto.ObjectKey{}, err
|
||||
}
|
||||
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
|
||||
if err != nil {
|
||||
return nil, crypto.ObjectKey{}, crypto.ErrInvalidCustomerKey
|
||||
}
|
||||
@@ -570,7 +563,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
|
||||
reader, err := sio.DecryptReader(client, sio.Config{
|
||||
Key: objectEncryptionKey,
|
||||
SequenceNumber: seqNumber,
|
||||
CipherSuites: fips.DARECiphers(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, crypto.ErrInvalidCustomerKey
|
||||
@@ -1062,7 +1054,7 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn {
|
||||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte(baseKey))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("unable to encrypt using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
@@ -1076,8 +1068,16 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
|
||||
return input, nil
|
||||
}
|
||||
var key []byte
|
||||
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
|
||||
key = k[:]
|
||||
if crypto.SSECopy.IsRequested(h) {
|
||||
sseCopyKey, err := crypto.SSECopy.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = sseCopyKey[:]
|
||||
} else {
|
||||
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
|
||||
key = k[:]
|
||||
}
|
||||
}
|
||||
key, err := decryptObjectMeta(key, o.Bucket, o.Name, o.UserDefined)
|
||||
if err != nil {
|
||||
@@ -1085,11 +1085,12 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
|
||||
}
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte(baseKey))
|
||||
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
|
||||
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil)})
|
||||
}
|
||||
}
|
||||
|
||||
// decryptPartsChecksums will attempt to decode checksums and return it/them if set.
|
||||
// decryptPartsChecksums will attempt to decrypt and decode part checksums, and save
|
||||
// only the decrypted part checksum values on ObjectInfo directly.
|
||||
// if part > 0, and we have the checksum for the part that will be returned.
|
||||
func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
|
||||
data := o.Checksum
|
||||
@@ -1114,6 +1115,23 @@ func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
|
||||
}
|
||||
}
|
||||
|
||||
// decryptChecksum will attempt to decrypt the ObjectInfo.Checksum, returns the decrypted value
|
||||
// An error is only returned if it was encrypted and the decryption failed.
|
||||
func (o *ObjectInfo) decryptChecksum(h http.Header) ([]byte, error) {
|
||||
data := o.Checksum
|
||||
if len(data) == 0 {
|
||||
return data, nil
|
||||
}
|
||||
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
|
||||
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = decrypted
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// metadataEncryptFn provides an encryption function for metadata.
|
||||
// Will return nil, nil if unencrypted.
|
||||
func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn, error) {
|
||||
|
||||
@@ -384,7 +384,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
// Simple useful utilities
|
||||
repeat = func(k int64, n int) []int64 {
|
||||
a := []int64{}
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
a = append(a, k)
|
||||
}
|
||||
return a
|
||||
@@ -471,10 +471,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
||||
// round up the lbPartOffset
|
||||
// to the end of the
|
||||
// corresponding DARE package
|
||||
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz
|
||||
if lbPkgEndOffset > v {
|
||||
lbPkgEndOffset = v
|
||||
}
|
||||
lbPkgEndOffset := min(lbPartOffset-(lbPartOffset%pkgSz)+pkgSz, v)
|
||||
bytesToDrop := v - lbPkgEndOffset
|
||||
|
||||
// Last segment to update `l`
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
@@ -122,9 +122,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar
|
||||
// eyes that we prefer a sorted setCount slice for the
|
||||
// subsequent function to figure out the right common
|
||||
// divisor, it avoids loops.
|
||||
sort.Slice(setCounts, func(i, j int) bool {
|
||||
return setCounts[i] < setCounts[j]
|
||||
})
|
||||
slices.Sort(setCounts)
|
||||
|
||||
return setCounts
|
||||
}
|
||||
|
||||
@@ -55,7 +55,6 @@ func TestCreateServerEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
srvCtxt := serverCtxt{}
|
||||
err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt)
|
||||
@@ -85,7 +84,6 @@ func TestGetDivisibleSize(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotGCD := getDivisibleSize(testCase.totalSizes)
|
||||
if testCase.result != gotGCD {
|
||||
@@ -172,7 +170,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||
for i, arg := range testCase.args {
|
||||
@@ -294,7 +291,6 @@ func TestGetSetIndexes(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||
for i, arg := range testCase.args {
|
||||
@@ -637,7 +633,6 @@ func TestParseEndpointSet(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotEs, err := parseEndpointSet(0, testCase.arg)
|
||||
if err != nil && testCase.success {
|
||||
|
||||
@@ -312,7 +312,6 @@ func TestCreateEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
i := i
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
var srvCtxt serverCtxt
|
||||
|
||||
@@ -136,10 +136,7 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64
|
||||
shardSize := e.ShardSize()
|
||||
shardFileSize := e.ShardFileSize(totalLength)
|
||||
endShard := (startOffset + length) / e.blockSize
|
||||
tillOffset := endShard*shardSize + shardSize
|
||||
if tillOffset > shardFileSize {
|
||||
tillOffset = shardFileSize
|
||||
}
|
||||
tillOffset := min(endShard*shardSize+shardSize, shardFileSize)
|
||||
return tillOffset
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
|
||||
var mu sync.Mutex
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, i := range r.Perm(len(disks)) {
|
||||
i := i
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -251,7 +251,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
// Verify erasure.Decode() for random offsets and lengths.
|
||||
for i := 0; i < iterations; i++ {
|
||||
for range iterations {
|
||||
offset := r.Int63n(length)
|
||||
readLen := r.Int63n(length - offset)
|
||||
|
||||
@@ -308,17 +308,16 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
|
||||
b.Fatalf("failed to create erasure test file: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < dataDown; i++ {
|
||||
for i := range dataDown {
|
||||
writers[i] = nil
|
||||
}
|
||||
for i := data; i < data+parityDown; i++ {
|
||||
writers[i] = nil
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
bitrotReaders := make([]io.ReaderAt, len(disks))
|
||||
for index, disk := range disks {
|
||||
if writers[index] == nil {
|
||||
|
||||
@@ -172,17 +172,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
|
||||
buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
|
||||
content := make([]byte, size)
|
||||
|
||||
for i := 0; i < dataDown; i++ {
|
||||
for i := range dataDown {
|
||||
disks[i] = OfflineDisk
|
||||
}
|
||||
for i := data; i < data+parityDown; i++ {
|
||||
disks[i] = OfflineDisk
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk == OfflineDisk {
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestErasureHeal(t *testing.T) {
|
||||
// setup stale disks for the test case
|
||||
staleDisks := make([]StorageAPI, len(disks))
|
||||
copy(staleDisks, disks)
|
||||
for j := 0; j < len(staleDisks); j++ {
|
||||
for j := range staleDisks {
|
||||
if j < test.offDisks {
|
||||
readers[j] = nil
|
||||
} else {
|
||||
|
||||
@@ -175,7 +175,7 @@ func TestListOnlineDisks(t *testing.T) {
|
||||
fourNanoSecs := time.Unix(4, 0).UTC()
|
||||
modTimesThreeNone := make([]time.Time, 16)
|
||||
modTimesThreeFour := make([]time.Time, 16)
|
||||
for i := 0; i < 16; i++ {
|
||||
for i := range 16 {
|
||||
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
|
||||
// to be tampered with.
|
||||
if i > 12 {
|
||||
@@ -244,7 +244,6 @@ func TestListOnlineDisks(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
|
||||
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
@@ -350,7 +349,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
|
||||
fourNanoSecs := time.Unix(4, 0).UTC()
|
||||
modTimesThreeNone := make([]time.Time, 16)
|
||||
modTimesThreeFour := make([]time.Time, 16)
|
||||
for i := 0; i < 16; i++ {
|
||||
for i := range 16 {
|
||||
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
|
||||
// to be tampered with.
|
||||
if i > 12 {
|
||||
@@ -419,7 +418,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
|
||||
_, err := obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
|
||||
@@ -753,7 +751,7 @@ func TestCommonParities(t *testing.T) {
|
||||
}
|
||||
for idx, test := range tests {
|
||||
var metaArr []FileInfo
|
||||
for i := 0; i < 12; i++ {
|
||||
for i := range 12 {
|
||||
fi := test.fi1
|
||||
if i%2 == 0 {
|
||||
fi = test.fi2
|
||||
|
||||
@@ -116,7 +116,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string,
|
||||
func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
// we ignore disk not found errors
|
||||
|
||||
@@ -296,7 +296,6 @@ func TestIsObjectDangling(t *testing.T) {
|
||||
// Add new cases as seen
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
gotMeta, dangling := isObjectDangling(testCase.metaArr, testCase.errs, testCase.dataErrs)
|
||||
if !gotMeta.Equals(testCase.expectedMeta) {
|
||||
|
||||
@@ -204,7 +204,6 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string,
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
// Read `xl.meta` in parallel across disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestDiskCount(t *testing.T) {
|
||||
// of errors into a single maximal error with in the list.
|
||||
func TestReduceErrs(t *testing.T) {
|
||||
canceledErrs := make([]error, 0, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
canceledErrs = append(canceledErrs, fmt.Errorf("error %d: %w", i, context.Canceled))
|
||||
}
|
||||
// List all of all test cases to validate various cases of reduce errors.
|
||||
@@ -222,7 +222,7 @@ func Test_hashOrder(t *testing.T) {
|
||||
var tmp [16]byte
|
||||
rng.Read(tmp[:])
|
||||
prefix := hex.EncodeToString(tmp[:])
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
rng.Read(tmp[:])
|
||||
|
||||
y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x)
|
||||
|
||||
@@ -408,7 +408,6 @@ func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbuc
|
||||
|
||||
// Start writing `xl.meta` to all disks in parallel.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
||||
@@ -189,7 +189,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
|
||||
commonNumVersions := 2
|
||||
numVersionsInQuorum := make([]int, 16)
|
||||
numVersionsNoQuorum := make([]int, 16)
|
||||
for i := 0; i < 16; i++ {
|
||||
for i := range 16 {
|
||||
if i < 4 {
|
||||
continue
|
||||
}
|
||||
@@ -269,7 +269,6 @@ func TestFindFileInfoInQuorum(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run("", func(t *testing.T) {
|
||||
fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum)
|
||||
_, ok1 := err.(InsufficientReadQuorum)
|
||||
@@ -316,7 +315,7 @@ func TestTransitionInfoEquals(t *testing.T) {
|
||||
}
|
||||
|
||||
var i uint
|
||||
for i = 0; i < 8; i++ {
|
||||
for i = range uint(8) {
|
||||
fi := FileInfo{
|
||||
TransitionTier: inputs[0].tier,
|
||||
TransitionedObjName: inputs[0].remoteObjName,
|
||||
|
||||
@@ -322,7 +322,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
|
||||
uploads = append(uploads, MultipartInfo{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))),
|
||||
UploadID: base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadID)),
|
||||
Initiated: startTime,
|
||||
})
|
||||
populatedUploadIDs.Add(uploadID)
|
||||
@@ -498,7 +498,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
partsMetadata[index].Metadata = userDefined
|
||||
}
|
||||
uploadUUID := fmt.Sprintf("%sx%d", mustGetUUID(), modTime.UnixNano())
|
||||
uploadID := base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadUUID)))
|
||||
uploadID := base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadUUID))
|
||||
uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID)
|
||||
|
||||
// Write updated `xl.meta` to all disks.
|
||||
@@ -540,7 +540,6 @@ func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, src
|
||||
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -820,7 +819,6 @@ func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI
|
||||
objectParts := make([][]string, len(onlineDisks))
|
||||
// List uploaded parts from drives.
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if onlineDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -995,7 +993,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
|
||||
objectPartInfos := make([][]*ObjectPartInfo, len(disks))
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -1161,6 +1158,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
Err: fmt.Errorf("checksum type mismatch. got %q (%s) expected %q (%s)", checksumType.String(), checksumType.ObjType(), opts.WantChecksum.Type.String(), opts.WantChecksum.Type.ObjType()),
|
||||
}
|
||||
}
|
||||
checksumType |= hash.ChecksumMultipart | hash.ChecksumIncludesMultipart
|
||||
}
|
||||
|
||||
var checksumCombined []byte
|
||||
@@ -1481,7 +1479,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
// Object info is the same in all disks, so we can pick
|
||||
// the first meta from online disk
|
||||
@@ -1509,17 +1507,10 @@ func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, objec
|
||||
auditObjectErasureSet(ctx, "AbortMultipartUpload", object, &er)
|
||||
}
|
||||
|
||||
// Validates if upload ID exists.
|
||||
if _, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false); err != nil {
|
||||
if errors.Is(err, errVolumeNotFound) {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// Cleanup all uploaded parts.
|
||||
er.deleteAll(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID))
|
||||
defer er.deleteAll(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID))
|
||||
|
||||
// Successfully purged.
|
||||
return nil
|
||||
// Validates if upload ID exists.
|
||||
_, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false)
|
||||
return toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"path"
|
||||
"runtime"
|
||||
@@ -504,7 +505,7 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
|
||||
|
||||
// count the number of offline disks
|
||||
offline := 0
|
||||
for i := 0; i < len(errs); i++ {
|
||||
for i := range len(errs) {
|
||||
var found bool
|
||||
switch {
|
||||
case errors.Is(errs[i], errDiskNotFound):
|
||||
@@ -542,7 +543,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
|
||||
disks := er.getDisks()
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -575,7 +575,6 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object
|
||||
rawFileInfos := make([]RawFileInfo, len(disks))
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -828,6 +827,13 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
||||
minDisks = er.setDriveCount - er.defaultParityCount
|
||||
}
|
||||
|
||||
if minDisks == er.setDriveCount/2 {
|
||||
// when data and parity are same we must atleast
|
||||
// wait for response from 1 extra drive to avoid
|
||||
// split-brain.
|
||||
minDisks++
|
||||
}
|
||||
|
||||
calcQuorum := func(metaArr []FileInfo, errs []error) (FileInfo, []FileInfo, []StorageAPI, time.Time, string, error) {
|
||||
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
@@ -1022,7 +1028,6 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
|
||||
dataDirs := make([]string, len(disks))
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -1221,7 +1226,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
||||
partsMetadata[index].SetInlineData()
|
||||
}
|
||||
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
// Object info is the same in all disks, so we can pick
|
||||
// the first meta from online disk
|
||||
@@ -1272,6 +1277,11 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
// if object doesn't exist and not a replication request return error for conditional requests
|
||||
if err != nil && !opts.ReplicationRequest {
|
||||
return objInfo, err
|
||||
}
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than -1.
|
||||
@@ -1470,7 +1480,17 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
actualSize = n
|
||||
}
|
||||
}
|
||||
if fi.Checksum == nil {
|
||||
// If ServerSideChecksum is wanted for this object, it takes precedence
|
||||
// over opts.WantChecksum.
|
||||
if opts.WantServerSideChecksumType.IsSet() {
|
||||
serverSideChecksum := r.RawServerSideChecksumResult()
|
||||
if serverSideChecksum != nil {
|
||||
fi.Checksum = serverSideChecksum.AppendTo(nil, nil)
|
||||
if opts.EncryptFn != nil {
|
||||
fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum)
|
||||
}
|
||||
}
|
||||
} else if fi.Checksum == nil && opts.WantChecksum != nil {
|
||||
// Trailing headers checksums should now be filled.
|
||||
fi.Checksum = opts.WantChecksum.AppendTo(nil, nil)
|
||||
if opts.EncryptFn != nil {
|
||||
@@ -1557,7 +1577,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
// Object info is the same in all disks, so we can pick
|
||||
// the first meta from online disk
|
||||
@@ -1574,7 +1594,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
if len(versions) == 0 {
|
||||
// Whether a disk was initially or becomes offline
|
||||
// during this upload, send it to the MRF list.
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
continue
|
||||
}
|
||||
@@ -1614,7 +1634,6 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
|
||||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -1819,7 +1838,6 @@ func (er erasureObjects) commitRenameDataDir(ctx context.Context, bucket, object
|
||||
}
|
||||
g := errgroup.WithNErrs(len(onlineDisks))
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if onlineDisks[index] == nil {
|
||||
return nil
|
||||
@@ -1845,7 +1863,6 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string
|
||||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return nil
|
||||
@@ -2205,9 +2222,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
maps.Copy(fi.Metadata, objInfo.UserDefined)
|
||||
fi.ModTime = opts.MTime
|
||||
fi.VersionID = opts.VersionID
|
||||
|
||||
@@ -2277,9 +2292,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
||||
|
||||
fi.Metadata[xhttp.AmzObjectTagging] = tags
|
||||
fi.ReplicationState = opts.PutReplicationState()
|
||||
for k, v := range opts.UserDefined {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
maps.Copy(fi.Metadata, opts.UserDefined)
|
||||
|
||||
if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
@@ -2297,7 +2310,6 @@ func (er erasureObjects) updateObjectMetaWithOpts(ctx context.Context, bucket, o
|
||||
|
||||
// Start writing `xl.meta` to all disks in parallel.
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if onlineDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
||||
@@ -112,7 +112,6 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
|
||||
t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{})
|
||||
if err != nil {
|
||||
@@ -625,7 +624,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for f := 0; f < 2; f++ {
|
||||
for f := range 2 {
|
||||
diskErrors := make(map[int]error)
|
||||
for i := 0; i <= f; i++ {
|
||||
diskErrors[i] = nil
|
||||
@@ -774,7 +773,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||
// in a 16 disk Erasure setup. The original disks are 'replaced' with
|
||||
// naughtyDisks that fail after 'f' successful StorageAPI method
|
||||
// invocations, where f - [0,4)
|
||||
for f := 0; f < 2; f++ {
|
||||
for f := range 2 {
|
||||
diskErrors := make(map[int]error)
|
||||
for i := 0; i <= f; i++ {
|
||||
diskErrors[i] = nil
|
||||
@@ -837,7 +836,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
|
||||
// in a 16 disk Erasure setup. The original disks are 'replaced' with
|
||||
// naughtyDisks that fail after 'f' successful StorageAPI method
|
||||
// invocations, where f - [0,2)
|
||||
for f := 0; f < 2; f++ {
|
||||
for f := range 2 {
|
||||
t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) {
|
||||
diskErrors := make(map[int]error)
|
||||
for i := 0; i <= f; i++ {
|
||||
@@ -1109,7 +1108,6 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
{parts7, errs7, 11, 11, parts7SC, nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.(*testing.T).Run("", func(t *testing.T) {
|
||||
globalStorageClass.Update(tt.storageClassCfg)
|
||||
actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks)))
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -117,12 +118,7 @@ func (pd *PoolDecommissionInfo) bucketPop(bucket string) bool {
|
||||
}
|
||||
|
||||
func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool {
|
||||
for _, b := range pd.DecommissionedBuckets {
|
||||
if b == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(pd.DecommissionedBuckets, bucket)
|
||||
}
|
||||
|
||||
func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) {
|
||||
@@ -792,8 +788,6 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
||||
}
|
||||
|
||||
for setIdx, set := range pool.sets {
|
||||
set := set
|
||||
|
||||
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
|
||||
if lc == nil {
|
||||
return false
|
||||
@@ -901,7 +895,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
||||
}
|
||||
|
||||
// gr.Close() is ensured by decommissionObject().
|
||||
for try := 0; try < 3; try++ {
|
||||
for range 3 {
|
||||
if version.IsRemote() {
|
||||
if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{
|
||||
VersionID: versionID,
|
||||
|
||||
@@ -176,7 +176,6 @@ func TestPoolMetaValidate(t *testing.T) {
|
||||
|
||||
t.Parallel()
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
update, err := testCase.meta.validate(testCase.pools)
|
||||
if testCase.expectedErr {
|
||||
|
||||
@@ -149,7 +149,7 @@ func (z *erasureServerPools) findIndex(index int) int {
|
||||
if z.rebalMeta == nil {
|
||||
return 0
|
||||
}
|
||||
for i := 0; i < len(z.rebalMeta.PoolStats); i++ {
|
||||
for i := range len(z.rebalMeta.PoolStats) {
|
||||
if i == index {
|
||||
return index
|
||||
}
|
||||
@@ -341,7 +341,8 @@ func (r *rebalanceMeta) save(ctx context.Context, store objectIO) error {
|
||||
return r.saveWithOpts(ctx, store, ObjectOptions{})
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) IsRebalanceStarted() bool {
|
||||
func (z *erasureServerPools) IsRebalanceStarted(ctx context.Context) bool {
|
||||
_ = z.loadRebalanceMeta(ctx)
|
||||
z.rebalMu.RLock()
|
||||
defer z.rebalMu.RUnlock()
|
||||
|
||||
@@ -394,12 +395,14 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
|
||||
var (
|
||||
quit bool
|
||||
traceMsg string
|
||||
notify bool // if status changed, notify nodes to reload rebalance metadata
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case rebalErr := <-doneCh:
|
||||
quit = true
|
||||
notify = true
|
||||
now := time.Now()
|
||||
var status rebalStatus
|
||||
|
||||
@@ -421,12 +424,16 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
|
||||
z.rebalMu.Unlock()
|
||||
|
||||
case <-timer.C:
|
||||
notify = false
|
||||
traceMsg = fmt.Sprintf("saved at %s", time.Now())
|
||||
}
|
||||
|
||||
stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg)
|
||||
err := z.saveRebalanceStats(GlobalContext, poolIdx, rebalSaveStats)
|
||||
stopFn(0, err)
|
||||
if err == nil && notify {
|
||||
globalNotificationSys.LoadRebalanceMeta(GlobalContext, false)
|
||||
}
|
||||
rebalanceLogIf(GlobalContext, err)
|
||||
|
||||
if quit {
|
||||
@@ -580,8 +587,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
|
||||
}
|
||||
|
||||
for setIdx, set := range pool.sets {
|
||||
set := set
|
||||
|
||||
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
|
||||
if lc == nil {
|
||||
return false
|
||||
@@ -594,7 +599,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
|
||||
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -689,7 +693,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
|
||||
continue
|
||||
}
|
||||
|
||||
for try := 0; try < 3; try++ {
|
||||
for range 3 {
|
||||
// GetObjectReader.Close is called by rebalanceObject
|
||||
gr, err := set.GetObjectNInfo(ctx,
|
||||
bucket,
|
||||
@@ -803,13 +807,20 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int
|
||||
ctx = lkCtx.Context()
|
||||
noLockOpts := ObjectOptions{NoLock: true}
|
||||
r := &rebalanceMeta{}
|
||||
if err := r.loadWithOpts(ctx, z.serverPools[0], noLockOpts); err != nil {
|
||||
err = r.loadWithOpts(ctx, z.serverPools[0], noLockOpts)
|
||||
if err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
z.rebalMu.Lock()
|
||||
defer z.rebalMu.Unlock()
|
||||
|
||||
// if not found, we store the memory metadata back
|
||||
// when rebalance status changed, will notify all nodes update status to memory, we can treat the memory metadata is the latest status
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
r = z.rebalMeta
|
||||
}
|
||||
|
||||
switch opts {
|
||||
case rebalSaveStoppedAt:
|
||||
r.StoppedAt = time.Now()
|
||||
|
||||
@@ -420,7 +420,6 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b
|
||||
nSets := make([]int, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
// Skip suspended pools or pools participating in rebalance for any new
|
||||
// I/O.
|
||||
if z.IsSuspended(index) || z.IsPoolRebalancing(index) {
|
||||
@@ -635,15 +634,18 @@ func (z *erasureServerPools) getPoolIdxNoLock(ctx context.Context, bucket, objec
|
||||
// if none are found falls back to most available space pool, this function is
|
||||
// designed to be only used by PutObject, CopyObject (newObject creation) and NewMultipartUpload.
|
||||
func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, size int64) (idx int, err error) {
|
||||
idx, err = z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{
|
||||
pinfo, _, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, ObjectOptions{
|
||||
SkipDecommissioned: true,
|
||||
SkipRebalancing: true,
|
||||
})
|
||||
|
||||
if err != nil && !isErrObjectNotFound(err) {
|
||||
return idx, err
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if isErrObjectNotFound(err) {
|
||||
idx = pinfo.Index
|
||||
if isErrObjectNotFound(err) || pinfo.Err == nil {
|
||||
// will generate a temp object
|
||||
idx = z.getAvailablePoolIdx(ctx, bucket, object, size)
|
||||
if idx < 0 {
|
||||
return -1, toObjectErr(errDiskFull)
|
||||
@@ -657,7 +659,6 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return z.serverPools[index].Shutdown(ctx)
|
||||
}, index)
|
||||
@@ -709,7 +710,6 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool)
|
||||
storageInfos := make([]StorageInfo, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics)
|
||||
return nil
|
||||
@@ -1089,6 +1089,10 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
|
||||
|
||||
object = encodeDirObject(object)
|
||||
if z.SinglePool() {
|
||||
_, err := z.getPoolIdx(ctx, bucket, object, data.Size())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
|
||||
}
|
||||
|
||||
@@ -1178,6 +1182,13 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob
|
||||
return z.deleteObjectFromAllPools(ctx, bucket, object, opts, noReadQuorumPools)
|
||||
}
|
||||
|
||||
// All replication requests needs to go to pool with the object.
|
||||
if opts.ReplicationRequest {
|
||||
objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts)
|
||||
objInfo.Name = decodeDirObject(object)
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
for _, pool := range z.serverPools {
|
||||
objInfo, err := pool.DeleteObject(ctx, bucket, object, opts)
|
||||
if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
@@ -1254,7 +1265,6 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
||||
|
||||
eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools))
|
||||
for i, pool := range z.serverPools {
|
||||
i := i
|
||||
pool := pool
|
||||
eg.Go(func() error {
|
||||
dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts)
|
||||
@@ -1340,12 +1350,15 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
|
||||
}
|
||||
|
||||
putOpts := ObjectOptions{
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
NoLock: true,
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
NoLock: true,
|
||||
EncryptFn: dstOpts.EncryptFn,
|
||||
WantChecksum: dstOpts.WantChecksum,
|
||||
WantServerSideChecksumType: dstOpts.WantServerSideChecksumType,
|
||||
}
|
||||
|
||||
return z.serverPools[poolIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
|
||||
@@ -1530,10 +1543,8 @@ func (z *erasureServerPools) listObjectsGeneric(ctx context.Context, bucket, pre
|
||||
}
|
||||
|
||||
if loi.IsTruncated && merged.lastSkippedEntry > loi.NextMarker {
|
||||
// An object hidden by ILM was found during a truncated listing. Since the number of entries
|
||||
// fetched from drives is limited by max-keys, we should use the last ILM filtered entry
|
||||
// as a continuation token if it is lexially higher than the last visible object so that the
|
||||
// next call of WalkDir() with the max-keys can reach new objects not seen previously.
|
||||
// An object hidden by ILM was found during a truncated listing. Set the next marker
|
||||
// as the last skipped entry if it is lexically higher loi.NextMarker as an optimization
|
||||
loi.NextMarker = merged.lastSkippedEntry
|
||||
}
|
||||
|
||||
@@ -1711,7 +1722,9 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
|
||||
}
|
||||
|
||||
z.mpCache.Range(func(_ string, mp MultipartInfo) bool {
|
||||
poolResult.Uploads = append(poolResult.Uploads, mp)
|
||||
if mp.Bucket == bucket {
|
||||
poolResult.Uploads = append(poolResult.Uploads, mp)
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(poolResult.Uploads, func(i int, j int) bool {
|
||||
@@ -1813,6 +1826,10 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object,
|
||||
}
|
||||
|
||||
if z.SinglePool() {
|
||||
_, err := z.getPoolIdx(ctx, bucket, object, data.Size())
|
||||
if err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
@@ -2223,7 +2240,6 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
||||
|
||||
for poolIdx, erasureSet := range z.serverPools {
|
||||
for setIdx, set := range erasureSet.sets {
|
||||
set := set
|
||||
listOut := make(chan metaCacheEntry, 1)
|
||||
entries = append(entries, listOut)
|
||||
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true)
|
||||
|
||||
@@ -95,7 +95,7 @@ func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI {
|
||||
s.erasureDisksMu.RLock()
|
||||
defer s.erasureDisksMu.RUnlock()
|
||||
|
||||
for i := 0; i < s.setCount; i++ {
|
||||
for i := range s.setCount {
|
||||
for j := 0; j < s.setDriveCount; j++ {
|
||||
disk := s.erasureDisks[i][j]
|
||||
if disk == OfflineDisk {
|
||||
@@ -150,7 +150,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int,
|
||||
if diskID == offlineDiskUUID {
|
||||
return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID)
|
||||
}
|
||||
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
||||
for i := range len(refFormat.Erasure.Sets) {
|
||||
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
|
||||
if refFormat.Erasure.Sets[i][j] == diskID {
|
||||
return i, j, nil
|
||||
@@ -174,7 +174,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
||||
return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This)
|
||||
}
|
||||
|
||||
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
||||
for i := range len(refFormat.Erasure.Sets) {
|
||||
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
|
||||
if refFormat.Erasure.Sets[i][j] == format.Erasure.This {
|
||||
return i, j, nil
|
||||
@@ -377,7 +377,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
||||
|
||||
mutex := newNSLock(globalIsDistErasure)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
||||
}
|
||||
|
||||
@@ -390,9 +390,9 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var lk sync.Mutex
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
lockerEpSet := set.NewStringSet()
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
wg.Add(1)
|
||||
go func(i int, endpoint Endpoint) {
|
||||
defer wg.Done()
|
||||
@@ -409,13 +409,13 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
||||
var innerWg sync.WaitGroup
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
disk := storageDisks[i*setDriveCount+j]
|
||||
if disk == nil {
|
||||
continue
|
||||
@@ -593,7 +593,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo {
|
||||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = s.sets[index].StorageInfo(ctx)
|
||||
return nil
|
||||
@@ -618,7 +617,6 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context, metrics bool) Storag
|
||||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics)
|
||||
return nil
|
||||
@@ -641,7 +639,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return s.sets[index].Shutdown(ctx)
|
||||
}, index)
|
||||
@@ -705,7 +702,6 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
|
||||
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
// we ignore disk not found errors
|
||||
@@ -868,11 +864,14 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
|
||||
}
|
||||
|
||||
putOpts := ObjectOptions{
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
EncryptFn: dstOpts.EncryptFn,
|
||||
WantChecksum: dstOpts.WantChecksum,
|
||||
WantServerSideChecksumType: dstOpts.WantServerSideChecksumType,
|
||||
}
|
||||
|
||||
return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
|
||||
|
||||
@@ -40,13 +40,12 @@ func BenchmarkCrcHash(b *testing.B) {
|
||||
{1024},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
testCase := testCase
|
||||
key := randString(testCase.key)
|
||||
b.Run("", func(b *testing.B) {
|
||||
b.SetBytes(1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
crcHashMod(key, 16)
|
||||
}
|
||||
})
|
||||
@@ -65,13 +64,12 @@ func BenchmarkSipHash(b *testing.B) {
|
||||
{1024},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
testCase := testCase
|
||||
key := randString(testCase.key)
|
||||
b.Run("", func(b *testing.B) {
|
||||
b.SetBytes(1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
sipHashMod(key, 16, testUUID)
|
||||
}
|
||||
})
|
||||
@@ -164,7 +162,7 @@ func TestNewErasureSets(t *testing.T) {
|
||||
|
||||
nDisks := 16 // Maximum disks.
|
||||
var erasureDisks []string
|
||||
for i := 0; i < nDisks; i++ {
|
||||
for range nDisks {
|
||||
// Do not attempt to create this path, the test validates
|
||||
// so that newErasureSets initializes non existing paths
|
||||
// and successfully returns initialized object layer.
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
@@ -175,7 +176,6 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
|
||||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
di := madmin.Disk{
|
||||
Endpoint: endpoints[index].String(),
|
||||
@@ -219,9 +219,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
|
||||
di.Metrics.LastMinute[k] = v.asTimedAction()
|
||||
}
|
||||
}
|
||||
for k, v := range info.Metrics.APICalls {
|
||||
di.Metrics.APICalls[k] = v
|
||||
}
|
||||
maps.Copy(di.Metrics.APICalls, info.Metrics.APICalls)
|
||||
if info.Total > 0 {
|
||||
di.Utilization = float64(info.Used / info.Total * 100)
|
||||
}
|
||||
@@ -287,7 +285,6 @@ func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (new
|
||||
infos := make([]DiskInfo, len(disks))
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, i := range r.Perm(len(disks)) {
|
||||
i := i
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -98,8 +98,8 @@ func fmtGenMain(ctxt *cli.Context) {
|
||||
setCount, setDriveCount := pool.SetCount, pool.DrivesPerSet
|
||||
format := newFormatErasureV3(setCount, setDriveCount)
|
||||
format.ID = deploymentID
|
||||
for i := 0; i < setCount; i++ { // for each erasure set
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount { // for each erasure set
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
if deploymentID != "" {
|
||||
|
||||
@@ -157,9 +157,9 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
|
||||
format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgoV3
|
||||
format.Erasure.Sets = make([][]string, numSets)
|
||||
|
||||
for i := 0; i < numSets; i++ {
|
||||
for i := range numSets {
|
||||
format.Erasure.Sets[i] = make([]string, setLen)
|
||||
for j := 0; j < setLen; j++ {
|
||||
for j := range setLen {
|
||||
format.Erasure.Sets[i][j] = mustGetUUID()
|
||||
}
|
||||
}
|
||||
@@ -177,7 +177,7 @@ func formatGetBackendErasureVersion(b []byte) (string, error) {
|
||||
return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, meta.Version)
|
||||
}
|
||||
if meta.Format != formatBackendErasure && meta.Format != formatBackendErasureSingle {
|
||||
return "", fmt.Errorf(`found backend type %s, expected %s or %s - to migrate to a supported backend visit https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html`, meta.Format, formatBackendErasure, formatBackendErasureSingle)
|
||||
return "", fmt.Errorf(`found backend type %s, expected %s or %s - to migrate to a supported backend visit https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-migrate-fs-gateway.html`, meta.Format, formatBackendErasure, formatBackendErasureSingle)
|
||||
}
|
||||
// Erasure backend found, proceed to detect version.
|
||||
format := &formatErasureVersionDetect{}
|
||||
@@ -324,7 +324,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
|
||||
|
||||
// Load format from each disk in parallel
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -514,7 +513,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
|
||||
}
|
||||
|
||||
// Make sure that the diskID is found in the set.
|
||||
for i := 0; i < len(tmpFormat.Erasure.Sets); i++ {
|
||||
for i := range len(tmpFormat.Erasure.Sets) {
|
||||
for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ {
|
||||
if this == tmpFormat.Erasure.Sets[i][j] {
|
||||
return nil
|
||||
@@ -530,7 +529,6 @@ func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, format
|
||||
|
||||
// Write `format.json` to all disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if formats[index] == nil {
|
||||
return errDiskNotFound
|
||||
@@ -566,7 +564,6 @@ func initStorageDisksWithErrors(endpoints Endpoints, opts storageOpts) ([]Storag
|
||||
storageDisks := make([]StorageAPI, len(endpoints))
|
||||
g := errgroup.WithNErrs(len(endpoints))
|
||||
for index := range endpoints {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
storageDisks[index], err = newStorageAPI(endpoints[index], opts)
|
||||
return err
|
||||
@@ -600,7 +597,6 @@ func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool {
|
||||
func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error {
|
||||
g := errgroup.WithNErrs(len(formats))
|
||||
for i := range formats {
|
||||
i := i
|
||||
g.Go(func() error {
|
||||
if formats[i] == nil || !endpoints[i].IsLocal {
|
||||
return nil
|
||||
@@ -639,9 +635,9 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
hostCount := make(map[string]int, setDriveCount)
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
disk := storageDisks[i*setDriveCount+j]
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
@@ -662,7 +658,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
|
||||
return
|
||||
}
|
||||
logger.Info(" * Set %v:", i+1)
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
disk := storageDisks[i*setDriveCount+j]
|
||||
logger.Info(" - Drive: %s", disk.String())
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestFixFormatV3(t *testing.T) {
|
||||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 8)
|
||||
|
||||
for j := 0; j < 8; j++ {
|
||||
for j := range 8 {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[0][j]
|
||||
formats[j] = newFormat
|
||||
@@ -79,7 +79,7 @@ func TestFormatErasureEmpty(t *testing.T) {
|
||||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 16)
|
||||
|
||||
for j := 0; j < 16; j++ {
|
||||
for j := range 16 {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[0][j]
|
||||
formats[j] = newFormat
|
||||
@@ -276,8 +276,8 @@ func TestGetFormatErasureInQuorumCheck(t *testing.T) {
|
||||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 32)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
@@ -390,18 +390,17 @@ func BenchmarkGetFormatErasureInQuorumOld(b *testing.B) {
|
||||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 15*200)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, _ = getFormatErasureInQuorumOld(formats)
|
||||
}
|
||||
}
|
||||
@@ -414,18 +413,17 @@ func BenchmarkGetFormatErasureInQuorum(b *testing.B) {
|
||||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 15*200)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, _ = getFormatErasureInQuorum(formats)
|
||||
}
|
||||
}
|
||||
@@ -440,8 +438,8 @@ func TestNewFormatSets(t *testing.T) {
|
||||
formats := make([]*formatErasureV3, 32)
|
||||
errs := make([]error, 32)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
|
||||
@@ -98,7 +98,7 @@ func (m *minioFileInfo) IsDir() bool {
|
||||
return m.isDir
|
||||
}
|
||||
|
||||
func (m *minioFileInfo) Sys() interface{} {
|
||||
func (m *minioFileInfo) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -316,7 +316,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
claims := make(map[string]interface{})
|
||||
claims := make(map[string]any)
|
||||
claims[expClaim] = UTCNow().Add(expiryDur).Unix()
|
||||
|
||||
claims[ldapUser] = lookupResult.NormDN
|
||||
|
||||
@@ -33,14 +33,14 @@ var globalRemoteFTPClientTransport = NewRemoteTargetHTTPTransport(true)()
|
||||
type minioLogger struct{}
|
||||
|
||||
// Print implement Logger
|
||||
func (log *minioLogger) Print(sessionID string, message interface{}) {
|
||||
func (log *minioLogger) Print(sessionID string, message any) {
|
||||
if serverDebugLog {
|
||||
fmt.Printf("%s %s\n", sessionID, message)
|
||||
}
|
||||
}
|
||||
|
||||
// Printf implement Logger
|
||||
func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) {
|
||||
func (log *minioLogger) Printf(sessionID string, format string, v ...any) {
|
||||
if serverDebugLog {
|
||||
if sessionID != "" {
|
||||
fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...))
|
||||
@@ -75,6 +75,7 @@ func startFTPServer(args []string) {
|
||||
portRange string
|
||||
tlsPrivateKey string
|
||||
tlsPublicCert string
|
||||
forceTLS bool
|
||||
)
|
||||
|
||||
var err error
|
||||
@@ -103,6 +104,11 @@ func startFTPServer(args []string) {
|
||||
tlsPrivateKey = tokens[1]
|
||||
case "tls-public-cert":
|
||||
tlsPublicCert = tokens[1]
|
||||
case "force-tls":
|
||||
forceTLS, err = strconv.ParseBool(tokens[1])
|
||||
if err != nil {
|
||||
logger.Fatal(fmt.Errorf("invalid arguments passed to --ftp=%s (%v)", arg, err), "unable to start FTP server")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,6 +135,10 @@ func startFTPServer(args []string) {
|
||||
|
||||
tls := tlsPrivateKey != "" && tlsPublicCert != ""
|
||||
|
||||
if forceTLS && !tls {
|
||||
logger.Fatal(fmt.Errorf("invalid TLS arguments provided. force-tls, but missing private key --ftp=\"tls-private-key=path/to/private.key\""), "unable to start FTP server")
|
||||
}
|
||||
|
||||
name := "MinIO FTP Server"
|
||||
if tls {
|
||||
name = "MinIO FTP(Secure) Server"
|
||||
@@ -147,6 +157,7 @@ func startFTPServer(args []string) {
|
||||
Logger: &minioLogger{},
|
||||
PassivePorts: portRange,
|
||||
PublicIP: publicIP,
|
||||
ForceTLS: forceTLS,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal(err, "unable to initialize FTP server")
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"net/http"
|
||||
"path"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -396,18 +397,16 @@ func setRequestValidityMiddleware(h http.Handler) http.Handler {
|
||||
if k == "delimiter" { // delimiters are allowed to have `.` or `..`
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
if hasBadPathComponent(v) {
|
||||
if ok {
|
||||
tc.FuncName = "handler.ValidRequest"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
if slices.ContainsFunc(vv, hasBadPathComponent) {
|
||||
if ok {
|
||||
tc.FuncName = "handler.ValidRequest"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasMultipleAuth(r) {
|
||||
|
||||
@@ -90,7 +90,7 @@ var isHTTPHeaderSizeTooLargeTests = []struct {
|
||||
|
||||
func generateHeader(size, usersize int) http.Header {
|
||||
header := http.Header{}
|
||||
for i := 0; i < size; i++ {
|
||||
for i := range size {
|
||||
header.Set(strconv.Itoa(i), "")
|
||||
}
|
||||
userlength := 0
|
||||
@@ -136,7 +136,6 @@ var containsReservedMetadataTests = []struct {
|
||||
|
||||
func TestContainsReservedMetadata(t *testing.T) {
|
||||
for _, test := range containsReservedMetadataTests {
|
||||
test := test
|
||||
t.Run("", func(t *testing.T) {
|
||||
contains := containsReservedMetadata(test.header)
|
||||
if contains && !test.shouldFail {
|
||||
@@ -201,7 +200,7 @@ func Benchmark_hasBadPathComponent(t *testing.B) {
|
||||
t.Run(tt.name, func(b *testing.B) {
|
||||
b.SetBytes(int64(len(tt.input)))
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
if got := hasBadPathComponent(tt.input); got != tt.want {
|
||||
t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
||||
10
cmd/grid.go
10
cmd/grid.go
@@ -22,7 +22,7 @@ import (
|
||||
"crypto/tls"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/rest"
|
||||
@@ -52,8 +52,8 @@ func initGlobalGrid(ctx context.Context, eps EndpointServerPools) error {
|
||||
newCachedAuthToken(),
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
CipherSuites: fips.TLSCiphers(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphers(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
}),
|
||||
Local: local,
|
||||
Hosts: hosts,
|
||||
@@ -85,8 +85,8 @@ func initGlobalLockGrid(ctx context.Context, eps EndpointServerPools) error {
|
||||
newCachedAuthToken(),
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
CipherSuites: fips.TLSCiphers(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphers(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
}, grid.RouteLockPath),
|
||||
Local: local,
|
||||
Hosts: hosts,
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"net/textproto"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
@@ -291,7 +292,7 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string)
|
||||
return contentEnc
|
||||
}
|
||||
var newEncs []string
|
||||
for _, enc := range strings.Split(contentEnc, ",") {
|
||||
for enc := range strings.SplitSeq(contentEnc, ",") {
|
||||
if enc != streamingContentEncoding {
|
||||
newEncs = append(newEncs, enc)
|
||||
}
|
||||
@@ -427,9 +428,31 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
|
||||
HTTPStatusCode: http.StatusUpgradeRequired,
|
||||
}, r.URL)
|
||||
default:
|
||||
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
|
||||
defer atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
|
||||
// When we are not running in S3 Express mode, generate appropriate error
|
||||
// for x-amz-write-offset HEADER specified.
|
||||
if _, ok := r.Header[xhttp.AmzWriteOffsetBytes]; ok {
|
||||
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
|
||||
if ok {
|
||||
tc.FuncName = "s3.AppendObject"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
writeErrorResponse(r.Context(), w, getAPIError(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt)
|
||||
if ok {
|
||||
tc.FuncName = "s3.ValidRequest"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
writeErrorResponse(r.Context(), w, APIError{
|
||||
Code: "BadRequest",
|
||||
Description: fmt.Sprintf("An error occurred when parsing the HTTP request %s at '%s'",
|
||||
Description: fmt.Sprintf("An unsupported API call for method: %s at '%s'",
|
||||
r.Method, r.URL.Path),
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
}, r.URL)
|
||||
|
||||
@@ -54,10 +54,7 @@ func (h *HTTPRangeSpec) GetLength(resourceSize int64) (rangeLength int64, err er
|
||||
|
||||
case h.IsSuffixLength:
|
||||
specifiedLen := -h.Start
|
||||
rangeLength = specifiedLen
|
||||
if specifiedLen > resourceSize {
|
||||
rangeLength = resourceSize
|
||||
}
|
||||
rangeLength = min(specifiedLen, resourceSize)
|
||||
|
||||
case h.Start >= resourceSize:
|
||||
return 0, InvalidRange{
|
||||
@@ -98,10 +95,7 @@ func (h *HTTPRangeSpec) GetOffsetLength(resourceSize int64) (start, length int64
|
||||
|
||||
start = h.Start
|
||||
if h.IsSuffixLength {
|
||||
start = resourceSize + h.Start
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
start = max(resourceSize+h.Start, 0)
|
||||
}
|
||||
return start, length, nil
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (ies *IAMEtcdStore) getUsersSysType() UsersSysType {
|
||||
return ies.usersSysType
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, itemPath string, opts ...options) error {
|
||||
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item any, itemPath string, opts ...options) error {
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -114,7 +114,7 @@ func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, it
|
||||
return saveKeyEtcd(ctx, ies.client, itemPath, data, opts...)
|
||||
}
|
||||
|
||||
func getIAMConfig(item interface{}, data []byte, itemPath string) error {
|
||||
func getIAMConfig(item any, data []byte, itemPath string) error {
|
||||
data, err := decryptData(data, itemPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -123,7 +123,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error {
|
||||
return json.Unmarshal(data, item)
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item interface{}, path string) error {
|
||||
func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item any, path string) error {
|
||||
data, err := readKeyEtcd(ctx, ies.client, path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -80,7 +81,7 @@ func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType {
|
||||
return iamOS.usersSysType
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error {
|
||||
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item any, objPath string, opts ...options) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
@@ -135,7 +136,7 @@ func (iamOS *IAMObjectStore) loadIAMConfigBytesWithMetadata(ctx context.Context,
|
||||
return data, meta, nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}, objPath string) error {
|
||||
func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item any, objPath string) error {
|
||||
data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -294,7 +295,6 @@ func (iamOS *IAMObjectStore) loadUserConcurrent(ctx context.Context, userType IA
|
||||
g := errgroup.WithNErrs(len(users))
|
||||
|
||||
for index := range users {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
userName := path.Dir(users[index])
|
||||
user, err := iamOS.loadUserIdentity(ctx, userName, userType)
|
||||
@@ -413,7 +413,6 @@ func (iamOS *IAMObjectStore) loadMappedPolicyConcurrent(ctx context.Context, use
|
||||
g := errgroup.WithNErrs(len(users))
|
||||
|
||||
for index := range users {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
userName := strings.TrimSuffix(users[index], ".json")
|
||||
userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup)
|
||||
@@ -538,7 +537,6 @@ func (iamOS *IAMObjectStore) loadPolicyDocConcurrent(ctx context.Context, polici
|
||||
g := errgroup.WithNErrs(len(policies))
|
||||
|
||||
for index := range policies {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
policyName := path.Dir(policies[index])
|
||||
policyDoc, err := iamOS.loadPolicy(ctx, policyName)
|
||||
@@ -776,9 +774,7 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
||||
}
|
||||
|
||||
// Copy svcUsersMap to cache.iamUsersMap
|
||||
for k, v := range svcUsersMap {
|
||||
cache.iamUsersMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamUsersMap, svcUsersMap)
|
||||
|
||||
cache.buildUserGroupMemberships()
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -159,7 +160,7 @@ func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string
|
||||
type UserIdentity struct {
|
||||
Version int `json:"version"`
|
||||
Credentials auth.Credentials `json:"credentials"`
|
||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
func newUserIdentity(cred auth.Credentials) UserIdentity {
|
||||
@@ -171,7 +172,7 @@ type GroupInfo struct {
|
||||
Version int `json:"version"`
|
||||
Status string `json:"status"`
|
||||
Members []string `json:"members"`
|
||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
func newGroupInfo(members []string) GroupInfo {
|
||||
@@ -182,7 +183,7 @@ func newGroupInfo(members []string) GroupInfo {
|
||||
type MappedPolicy struct {
|
||||
Version int `json:"version"`
|
||||
Policies string `json:"policy"`
|
||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
// mappedPoliciesToMap copies the map of mapped policies to a regular map.
|
||||
@@ -198,7 +199,7 @@ func mappedPoliciesToMap(m *xsync.MapOf[string, MappedPolicy]) map[string]Mapped
|
||||
// converts a mapped policy into a slice of distinct policies
|
||||
func (mp MappedPolicy) toSlice() []string {
|
||||
var policies []string
|
||||
for _, policy := range strings.Split(mp.Policies, ",") {
|
||||
for policy := range strings.SplitSeq(mp.Policies, ",") {
|
||||
if strings.TrimSpace(policy) == "" {
|
||||
continue
|
||||
}
|
||||
@@ -219,8 +220,8 @@ func newMappedPolicy(policy string) MappedPolicy {
|
||||
type PolicyDoc struct {
|
||||
Version int `json:",omitempty"`
|
||||
Policy policy.Policy
|
||||
CreateDate time.Time `json:",omitempty"`
|
||||
UpdateDate time.Time `json:",omitempty"`
|
||||
CreateDate time.Time
|
||||
UpdateDate time.Time
|
||||
}
|
||||
|
||||
func newPolicyDoc(p policy.Policy) PolicyDoc {
|
||||
@@ -400,7 +401,6 @@ func (c *iamCache) policyDBGetGroups(store *IAMStoreSys, userPolicyPresent bool,
|
||||
g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time.
|
||||
|
||||
for index := range groups {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap)
|
||||
if err != nil && !errors.Is(err, errNoSuchPolicy) {
|
||||
@@ -610,8 +610,8 @@ type IAMStorageAPI interface {
|
||||
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
|
||||
loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error
|
||||
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
|
||||
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error
|
||||
loadIAMConfig(ctx context.Context, item interface{}, path string) error
|
||||
saveIAMConfig(ctx context.Context, item any, path string, opts ...options) error
|
||||
loadIAMConfig(ctx context.Context, item any, path string) error
|
||||
deleteIAMConfig(ctx context.Context, path string) error
|
||||
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
|
||||
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
|
||||
@@ -839,7 +839,7 @@ func (store *IAMStoreSys) PolicyDBGet(name string, groups ...string) ([]string,
|
||||
return policies, nil
|
||||
}
|
||||
if store.policy != nil {
|
||||
val, err, _ := store.policy.Do(name, func() (interface{}, error) {
|
||||
val, err, _ := store.policy.Do(name, func() (any, error) {
|
||||
return getPolicies()
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1614,9 +1614,7 @@ func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Polic
|
||||
}
|
||||
|
||||
cache := store.lock()
|
||||
for policy, p := range m {
|
||||
cache.iamPolicyDocsMap[policy] = p
|
||||
}
|
||||
maps.Copy(cache.iamPolicyDocsMap, m)
|
||||
store.unlock()
|
||||
|
||||
for policy, p := range m {
|
||||
@@ -2909,7 +2907,7 @@ func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Cred
|
||||
func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error {
|
||||
groupLoad := env.Get("_MINIO_IAM_GROUP_REFRESH", config.EnableOff) == config.EnableOn
|
||||
|
||||
newCachePopulate := func() (val interface{}, err error) {
|
||||
newCachePopulate := func() (val any, err error) {
|
||||
newCache := newIamCache()
|
||||
|
||||
// Check for service account first
|
||||
@@ -2975,7 +2973,7 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
|
||||
}
|
||||
|
||||
var (
|
||||
val interface{}
|
||||
val any
|
||||
err error
|
||||
)
|
||||
if store.group != nil {
|
||||
@@ -3007,30 +3005,20 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
|
||||
return true
|
||||
})
|
||||
|
||||
for k, v := range newCache.iamGroupsMap {
|
||||
cache.iamGroupsMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamGroupsMap, newCache.iamGroupsMap)
|
||||
|
||||
for k, v := range newCache.iamPolicyDocsMap {
|
||||
cache.iamPolicyDocsMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamPolicyDocsMap, newCache.iamPolicyDocsMap)
|
||||
|
||||
for k, v := range newCache.iamUserGroupMemberships {
|
||||
cache.iamUserGroupMemberships[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamUserGroupMemberships, newCache.iamUserGroupMemberships)
|
||||
|
||||
newCache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool {
|
||||
cache.iamUserPolicyMap.Store(k, v)
|
||||
return true
|
||||
})
|
||||
|
||||
for k, v := range newCache.iamUsersMap {
|
||||
cache.iamUsersMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamUsersMap, newCache.iamUsersMap)
|
||||
|
||||
for k, v := range newCache.iamSTSAccountsMap {
|
||||
cache.iamSTSAccountsMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamSTSAccountsMap, newCache.iamSTSAccountsMap)
|
||||
|
||||
newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool {
|
||||
cache.iamSTSPolicyMap.Store(k, v)
|
||||
|
||||
51
cmd/iam.go
51
cmd/iam.go
@@ -24,6 +24,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"path"
|
||||
"sort"
|
||||
@@ -276,7 +277,9 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
|
||||
for {
|
||||
if !openidInit {
|
||||
openidConfig, err := openid.LookupConfig(s,
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region())
|
||||
xhttp.WithUserAgent(NewHTTPTransport(), func() string {
|
||||
return getUserAgent(getMinioMode())
|
||||
}), xhttp.DrainBody, globalSite.Region())
|
||||
if err != nil {
|
||||
iamLogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err), logger.WarningKind)
|
||||
} else {
|
||||
@@ -366,14 +369,11 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
|
||||
sys.rolesMap = make(map[arn.ARN]string)
|
||||
|
||||
// From OpenID
|
||||
if riMap := sys.OpenIDConfig.GetRoleInfo(); riMap != nil {
|
||||
sys.validateAndAddRolePolicyMappings(ctx, riMap)
|
||||
}
|
||||
maps.Copy(sys.rolesMap, sys.OpenIDConfig.GetRoleInfo())
|
||||
|
||||
// From AuthN plugin if enabled.
|
||||
if authn := newGlobalAuthNPluginFn(); authn != nil {
|
||||
riMap := authn.GetRoleInfo()
|
||||
sys.validateAndAddRolePolicyMappings(ctx, riMap)
|
||||
maps.Copy(sys.rolesMap, authn.GetRoleInfo())
|
||||
}
|
||||
|
||||
sys.printIAMRoles()
|
||||
@@ -501,33 +501,6 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
|
||||
}
|
||||
}
|
||||
|
||||
func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[arn.ARN]string) {
|
||||
// Validate that policies associated with roles are defined. If
|
||||
// authZ plugin is set, role policies are just claims sent to
|
||||
// the plugin and they need not exist.
|
||||
//
|
||||
// If some mapped policies do not exist, we print some error
|
||||
// messages but continue any way - they can be fixed in the
|
||||
// running server by creating the policies after start up.
|
||||
for arn, rolePolicies := range m {
|
||||
specifiedPoliciesSet := newMappedPolicy(rolePolicies).policySet()
|
||||
validPolicies, _ := sys.store.MergePolicies(rolePolicies)
|
||||
knownPoliciesSet := newMappedPolicy(validPolicies).policySet()
|
||||
unknownPoliciesSet := specifiedPoliciesSet.Difference(knownPoliciesSet)
|
||||
if len(unknownPoliciesSet) > 0 {
|
||||
authz := newGlobalAuthZPluginFn()
|
||||
if authz == nil {
|
||||
// Print a warning that some policies mapped to a role are not defined.
|
||||
errMsg := fmt.Errorf(
|
||||
"The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.",
|
||||
unknownPoliciesSet.ToSlice(), arn.String())
|
||||
authZLogIf(ctx, errMsg, logger.WarningKind)
|
||||
}
|
||||
}
|
||||
sys.rolesMap[arn] = rolePolicies
|
||||
}
|
||||
}
|
||||
|
||||
// Prints IAM role ARNs.
|
||||
func (sys *IAMSys) printIAMRoles() {
|
||||
if len(sys.rolesMap) == 0 {
|
||||
@@ -1083,7 +1056,7 @@ type newServiceAccountOpts struct {
|
||||
expiration *time.Time
|
||||
allowSiteReplicatorAccount bool // allow creating internal service account for site-replication.
|
||||
|
||||
claims map[string]interface{}
|
||||
claims map[string]any
|
||||
}
|
||||
|
||||
// NewServiceAccount - create a new service account
|
||||
@@ -1126,7 +1099,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
||||
if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount {
|
||||
return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed
|
||||
}
|
||||
m := make(map[string]interface{})
|
||||
m := make(map[string]any)
|
||||
m[parentClaim] = parentUser
|
||||
|
||||
if len(policyBuf) > 0 {
|
||||
@@ -1372,7 +1345,7 @@ func (sys *IAMSys) getAccountWithClaims(ctx context.Context, accessKey string) (
|
||||
}
|
||||
|
||||
// GetClaimsForSvcAcc - gets the claims associated with the service account.
|
||||
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]interface{}, error) {
|
||||
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]any, error) {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
@@ -1723,10 +1696,8 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap
|
||||
return skippedAccessKeys, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...))
|
||||
}
|
||||
|
||||
for k, v := range updatedKeysMap {
|
||||
// Replace the map values with the updated ones
|
||||
accessKeyMap[k] = v
|
||||
}
|
||||
// Replace the map values with the updated ones
|
||||
maps.Copy(accessKeyMap, updatedKeysMap)
|
||||
|
||||
return skippedAccessKeys, nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"maps"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -110,9 +111,7 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b
|
||||
return nil, nil, false, errAuthentication
|
||||
}
|
||||
|
||||
for k, v := range eclaims {
|
||||
claims.MapClaims[k] = v
|
||||
}
|
||||
maps.Copy(claims.MapClaims, eclaims)
|
||||
|
||||
// if root access is disabled, disable all its service accounts and temporary credentials.
|
||||
if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() {
|
||||
|
||||
@@ -175,7 +175,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
|
||||
fn := authenticateNode
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
fn(creds.AccessKey, creds.SecretKey)
|
||||
}
|
||||
})
|
||||
@@ -183,7 +183,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
|
||||
fn := newCachedAuthToken()
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
fn()
|
||||
}
|
||||
})
|
||||
|
||||
@@ -280,7 +280,6 @@ func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Reque
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
|
||||
Name: key.KeyID,
|
||||
Version: key.Version,
|
||||
Ciphertext: key.Ciphertext,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
|
||||
@@ -139,7 +139,7 @@ func pickRelevantGoroutines() (gs []string) {
|
||||
// get runtime stack buffer.
|
||||
buf := debug.Stack()
|
||||
// runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" .
|
||||
for _, g := range strings.Split(string(buf), "\n\n") {
|
||||
for g := range strings.SplitSeq(string(buf), "\n\n") {
|
||||
// Again split on a new line, the first line of the second half contains the info about the go routine.
|
||||
sl := strings.SplitN(g, "\n", 2)
|
||||
if len(sl) != 2 {
|
||||
|
||||
@@ -329,7 +329,7 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep
|
||||
lris, ok := l.lockMap[resource]
|
||||
if !ok {
|
||||
// Just to be safe, delete uuids.
|
||||
for idx := 0; idx < maxDeleteList; idx++ {
|
||||
for idx := range maxDeleteList {
|
||||
mapID := formatUUID(uid, idx)
|
||||
if _, ok := l.lockUID[mapID]; !ok {
|
||||
break
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user