Compare commits

..

1 Commits

Author SHA1 Message Date
Philip Laine
548b6b1bdb Replace http util reverese proxy with custom request forwarding 2024-04-14 12:45:59 +02:00
122 changed files with 11761 additions and 8461 deletions

View File

@ -5,7 +5,7 @@ body:
- type: markdown
attributes:
value: |
Thank you for taking the time to fill ot this bug report! Please read the [FAQ](https://spegel.dev/docs/faq/) and check existing issues before submitting a new issue.
Thank you for taking the time to fill ot this bug report! Please read the [FAQ](../../docs/FAQ.md) and check existing issues before submitting.
- type: input
attributes:
label: Spegel version

View File

@ -10,7 +10,3 @@ updates:
schedule:
interval: "daily"
open-pull-requests-limit: 15
groups:
k8s:
patterns:
- "k8s.io/*"

View File

@ -1,30 +1,29 @@
name: artifacthub
on:
push:
branches: ["main"]
paths:
- "charts/spegel/artifacthub-repo.yml"
paths: ["charts/spegel/artifacthub-repo.yml"]
permissions:
contents: read
packages: write
defaults:
run:
shell: bash
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Clone repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
uses: actions/checkout@v4
with:
submodules: true
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 #v3.4.0
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Setup ORAS
uses: oras-project/setup-oras@8d34698a59f5ffe24821f0b48ab62a3de8b64b20 #v1.2.3
uses: oras-project/setup-oras@v1
- name: Push Artifact Hub metadata
run: oras push ghcr.io/spegel-org/helm-charts/spegel:artifacthub.io --config /dev/null:application/vnd.cncf.artifacthub.config.v1+yaml charts/spegel/artifacthub-repo.yml:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml

View File

@ -1,38 +0,0 @@
name: e2e
on:
pull_request:
permissions:
contents: read
defaults:
run:
shell: bash
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- proxy-mode: iptables
ip-family: ipv4
- proxy-mode: iptables
ip-family: ipv6
- proxy-mode: ipvs
ip-family: ipv4
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 #v5.5.0
with:
go-version-file: go.mod
- name: Setup GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
install-only: true
- name: Setup Kind
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 #v1.12.0
with:
version: v0.29.0
install_only: true
- name: Run e2e
run: make test-e2e E2E_PROXY_MODE=${{ matrix.proxy-mode }} E2E_IP_FAMILY=${{ matrix.ip-family }}

View File

@ -1,35 +0,0 @@
name: go
on:
pull_request:
permissions:
contents: read
defaults:
run:
shell: bash
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 #v5.5.0
with:
go-version-file: go.mod
- name: Setup golangci-lint
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 #v8.0.0
unit:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 #v5.5.0
with:
go-version-file: go.mod
- name: Run tests
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 #v5.4.3
with:
token: ${{ secrets.CODECOV_TOKEN }}

View File

@ -1,27 +0,0 @@
name: helm
on:
pull_request:
permissions:
contents: read
defaults:
run:
shell: bash
jobs:
docs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 #v5.5.0
with:
go-version-file: go.mod
- name: Run helm-docs
run: make helm-docs
- name: Check if working tree is dirty
run: |
if [[ $(git diff --stat) != '' ]]; then
git diff
echo 'run make helm-docs and commit changes'
exit 1
fi

View File

@ -1,61 +1,55 @@
name: release
on:
push:
tags:
- 'v*'
release:
types: [published]
permissions:
contents: write
contents: read
packages: write
id-token: write
defaults:
run:
shell: bash
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Clone repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
uses: actions/checkout@v4
- name: Setup Cosign
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb #v3.8.2
uses: sigstore/cosign-installer@v3.5.0
- name: Setup Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 #v4.3.0
uses: azure/setup-helm@v4
with:
version: v3.17.3
version: v3.12.1
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Setup Docker Buildx
id: buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 #v3.10.0
uses: docker/setup-buildx-action@v3
- name: Setup yq
uses: frenck/action-setup-yq@c4b5be8b4a215c536a41d436757d9feb92836d4f #v1.0.2
uses: frenck/action-setup-yq@v1
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 #v3.4.0
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Prepare version
- name: Prepare
id: prep
run: |
VERSION=sha-${GITHUB_SHA::8}
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF/refs\/tags\//}
fi
echo "Refer to the [Changelog](https://github.com/spegel-org/spegel/blob/main/CHANGELOG.md#${VERSION//.}) for list of changes." > ${{ runner.temp }}/NOTES.txt
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
args: release --clean --release-notes ${{ runner.temp }}/NOTES.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Generate images meta
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 #v5.7.0
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository_owner }}/spegel
images: ghcr.io/spegel-org/spegel
tags: type=raw,value=${{ steps.prep.outputs.VERSION }}
- name: Publish multi-arch image
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 #v6.18.0
uses: docker/build-push-action@v5
id: build
with:
push: true
@ -63,22 +57,20 @@ jobs:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
tags: ghcr.io/${{ github.repository_owner }}/spegel:${{ steps.prep.outputs.VERSION }}
tags: ghcr.io/spegel-org/spegel:${{ steps.prep.outputs.VERSION }}
labels: ${{ steps.meta.outputs.labels }}
- name: Sign the image with Cosign
run: |
cosign sign --yes ghcr.io/${{ github.repository_owner }}/spegel@${{ steps.build.outputs.DIGEST }}
cosign sign --yes ghcr.io/spegel-org/spegel@${{ steps.build.outputs.DIGEST }}
- name: Publish Helm chart to GHCR
id: helm
run: |
HELM_VERSION=${{ steps.prep.outputs.VERSION }}
HELM_VERSION=${HELM_VERSION#v}
rm charts/spegel/artifacthub-repo.yml
yq -i '.image.digest = "${{ steps.build.outputs.DIGEST }}"' charts/spegel/values.yaml
helm package --app-version ${{ steps.prep.outputs.VERSION }} --version ${HELM_VERSION} charts/spegel
helm push spegel-${HELM_VERSION}.tgz oci://ghcr.io/${{ github.repository_owner }}/helm-charts 2> .digest
helm package --app-version ${{ steps.prep.outputs.VERSION }} --version ${{ steps.prep.outputs.VERSION }} charts/spegel
helm push spegel-${{ steps.prep.outputs.VERSION }}.tgz oci://ghcr.io/spegel-org/helm-charts 2> .digest
DIGEST=$(cat .digest | awk -F "[, ]+" '/Digest/{print $NF}')
echo "DIGEST=${DIGEST}" >> $GITHUB_OUTPUT
- name: Sign the Helm chart with Cosign
run: |
cosign sign --yes ghcr.io/${{ github.repository_owner }}/helm-charts/spegel@${{ steps.helm.outputs.DIGEST }}
cosign sign --yes ghcr.io/spegel-org/helm-charts/spegel@${{ steps.helm.outputs.DIGEST }}

75
.github/workflows/tests.yaml vendored Normal file
View File

@ -0,0 +1,75 @@
name: tests
on:
pull_request:
push:
branches:
- main
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.21.x
check-latest: true
cache: true
- name: Setup golangci-lint
uses: golangci/golangci-lint-action@v4
with:
version: v1.55.2
args: --timeout 3m0s
unit:
needs: lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.21.x
check-latest: true
cache: true
- name: Run tests
run: make test
e2e:
needs: lint
runs-on: ubuntu-latest
strategy:
matrix:
cni: [iptables, iptables-ipv6, ipvs]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.21.x
check-latest: true
cache: true
- name: Run e2e
run: make e2e CNI=${{ matrix.cni }}
helm-docs:
needs: lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: 1.21.x
check-latest: true
cache: true
- name: Run helm-docs
run: make helm-docs
- name: Check if working tree is dirty
run: |
if [[ $(git diff --stat) != '' ]]; then
git diff
echo 'run make helm-docs and commit changes'
exit 1
fi

3
.gitignore vendored
View File

@ -19,6 +19,3 @@
# Go workspace file
go.work
# Added by goreleaser init:
dist/

View File

@ -1,105 +1,15 @@
version: "2"
linters:
default: none
disable-all: true
enable:
- errcheck
- gocritic
- gosimple
- govet
- importas
- ineffassign
- ireturn
- misspell
- nolintlint
- paralleltest
- perfsprint
- staticcheck
- testifylint
- unused
- noctx
settings:
errcheck:
disable-default-exclusions: true
check-type-assertions: true
check-blank: true
gocritic:
enable-all: true
disabled-checks:
- importShadow
- hugeParam
- rangeValCopy
- whyNoLint
- unnamedResult
- httpNoBody
govet:
disable:
- shadow
enable-all: true
importas:
alias:
- pkg: io/fs
alias: iofs
- pkg: github.com/go-logr/logr/testing
alias: tlog
- pkg: github.com/pelletier/go-toml/v2/unstable
alias: tomlu
- pkg: github.com/multiformats/go-multiaddr/net
alias: manet
- pkg: github.com/multiformats/go-multiaddr
alias: ma
- pkg: github.com/multiformats/go-multicodec
alias: mc
- pkg: github.com/multiformats/go-multihash
alias: mh
- pkg: github.com/ipfs/go-cid
alias: cid
- pkg: github.com/libp2p/go-libp2p-kad-dht
alias: dht
- pkg: github.com/libp2p/go-libp2p/p2p/net/mock
alias: mocknet
- pkg: go.etcd.io/bbolt
alias: bolt
- pkg: k8s.io/cri-api/pkg/apis/runtime/v1
alias: runtimeapi
- pkg: github.com/containerd/containerd/api/events
alias: eventtypes
- pkg: github.com/opencontainers/go-digest
alias: digest
- pkg: github.com/opencontainers/image-spec/specs-go/v1
alias: ocispec
- pkg: k8s.io/apimachinery/pkg/util/version
alias: utilversion
no-extra-aliases: true
nolintlint:
require-explanation: true
require-specific: true
perfsprint:
strconcat: false
testifylint:
enable-all: true
ireturn:
allow:
- anon
- error
- empty
- stdlib
- github.com/libp2p/go-libp2p/core/crypto.PrivKey
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
- misspell
- testifylint
linters-settings:
govet:
enable:
- fieldalignment

View File

@ -1,26 +0,0 @@
version: 2
project_name: spegel
before:
hooks:
- go mod tidy
builds:
- goos:
- linux
goarch:
- amd64
- arm
- arm64
goarm:
- 7
env:
- CGO_ENABLED=0
flags:
- -trimpath
- -a
no_unique_dist_dir: true
binary: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}/{{ .ProjectName }}"
archives:
- formats: [tar.gz]
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- none*

9
ADOPTERS.md Normal file
View File

@ -0,0 +1,9 @@
# Adopters
This list shows adopters of Spegel. If you are using Spegel in your organization, please consider yourself to this list, as it lends credibility to the project.
| Organization | Website |
| --- |--- |
| Xenit AB | https://xenit.se/ |
| National Research Platform | https://nationalresearchplatform.org |
| K3S | https://k3s.io/ |

View File

@ -7,296 +7,26 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## Unreleased
### Added
- [#905](https://github.com/spegel-org/spegel/pull/905) Change mirror type to url and add byte range parameter.
- [#909](https://github.com/spegel-org/spegel/pull/909) Add base http client and transport.
- [#910](https://github.com/spegel-org/spegel/pull/910) Add drain and close function.
### Changed
- [#906](https://github.com/spegel-org/spegel/pull/906) Replace HTTP header strings with httpx constants.
- [#916](https://github.com/spegel-org/spegel/pull/916) Refactor OCI client options and add header configuration.
### Deprecated
### Removed
### Fixed
- [#911](https://github.com/spegel-org/spegel/pull/911) Enforce use of request contexts and fix response closing.
- [#914](https://github.com/spegel-org/spegel/pull/914) Fix OCI client header parsing and improve tests.
### Security
## v0.3.0
### Added
- [#877](https://github.com/spegel-org/spegel/pull/877) Add support for www authenticate header.
- [#878](https://github.com/spegel-org/spegel/pull/878) Add dial timeout configuration in Containerd mirror configuration.
- [#889](https://github.com/spegel-org/spegel/pull/889) Add support for content create events.
### Changed
- [#881](https://github.com/spegel-org/spegel/pull/881) Add Variable for job name in Grafana Dashboard.
- [#852](https://github.com/spegel-org/spegel/pull/852) Remove use of Afero in Containerd config.
- [#854](https://github.com/spegel-org/spegel/pull/854) Implement unit tests for cleanup logic.
- [#860](https://github.com/spegel-org/spegel/pull/860) Update Go to 1.24.2.
- [#864](https://github.com/spegel-org/spegel/pull/864) Rename OCI client to store.
- [#871](https://github.com/spegel-org/spegel/pull/871) Implement OCI client and refactor debug web pulling.
- [#873](https://github.com/spegel-org/spegel/pull/873) Refactor web to use internal mux router.
- [#875](https://github.com/spegel-org/spegel/pull/875) Change debug unit formatting and add totals.
- [#880](https://github.com/spegel-org/spegel/pull/880) Refactor store advertisement to list content.
- [#888](https://github.com/spegel-org/spegel/pull/888) Refactor OCI events to support content events.
- [#890](https://github.com/spegel-org/spegel/pull/890) Refactor Containerd options to use config struct.
- [#896](https://github.com/spegel-org/spegel/pull/896) Rename package mux to httpx and refactor http helpers.
- [#897](https://github.com/spegel-org/spegel/pull/897) Add descriptor to header conversion.
### Fixed
- [#869](https://github.com/spegel-org/spegel/pull/869) Fix request logging for redirects and not found pages.
- [#872](https://github.com/spegel-org/spegel/pull/872) Allow returning libp2p crypto priv key in linter.
- [#894](https://github.com/spegel-org/spegel/pull/894) Update Kind to v0.29.0 and Fix Containerd v2 support.
- [#899](https://github.com/spegel-org/spegel/pull/899) Handle situation where digest is missing in reigstry response header.
- [#902](https://github.com/spegel-org/spegel/pull/902) Disable data dir when running Spegel in Kubernetes.
## v0.2.0
### Added
- [#832](https://github.com/spegel-org/spegel/pull/832) Add delete hook to cleanup configuration from host when chart is uninstalled.
- [#846](https://github.com/spegel-org/spegel/pull/846) Build binaries as part of the release process.
- [#848](https://github.com/spegel-org/spegel/pull/848) Add support for a static bootstrapper.
- [#850](https://github.com/spegel-org/spegel/pull/850) Persist libp2p key to disk when data directory is set.
### Changed
- [#812](https://github.com/spegel-org/spegel/pull/812) Upgrade to Go 1.24.1 and switch to use go tool for helm docs.
- [#725](https://github.com/spegel-org/spegel/pull/725) Remove use of httputil reverse proxy.
- [#820](https://github.com/spegel-org/spegel/pull/820) Switch to using new test context.
- [#827](https://github.com/spegel-org/spegel/pull/827) Add p2p options to router for optional configuration.
- [#835](https://github.com/spegel-org/spegel/pull/835) Refactor registry config to align with router config.
- [#847](https://github.com/spegel-org/spegel/pull/847) Set default values for address arguments.
### Removed
- [#831](https://github.com/spegel-org/spegel/pull/831) Remove local address check when resolving peers.
### Fixed
- [#824](https://github.com/spegel-org/spegel/pull/824) Fix improper image string formatting and expand tests.
- [#825](https://github.com/spegel-org/spegel/pull/825) Fix gopls modernize warnings.
- [#826](https://github.com/spegel-org/spegel/pull/826) Standardize router channel naming.
- [#844](https://github.com/spegel-org/spegel/pull/844) Fix p2p option naming to conform with the standard.
- [#849](https://github.com/spegel-org/spegel/pull/849) Fix libp2p options so field is exported in configuration.
## v0.1.1
### Fixed
- [#807](https://github.com/spegel-org/spegel/pull/807) Update golangci lint and fix new issues.
- [#810](https://github.com/spegel-org/spegel/pull/810) Increase timeout to avoid flakiness in conformance tests.
- [#806](https://github.com/spegel-org/spegel/pull/806) Fix verification of Containerd configuration with suffixes.
## v0.1.0
### Added
- [#717](https://github.com/spegel-org/spegel/pull/717) Extend tests for distribution.
- [#753](https://github.com/spegel-org/spegel/pull/753) Set GOMAXPROCS and GOMEMLIMIT when limits are set.
- [#792](https://github.com/spegel-org/spegel/pull/792) Add dev deploy recipe to simplify local development.
- [#791](https://github.com/spegel-org/spegel/pull/791) Add debug view to help validating Spegel.
### Changed
- [#747](https://github.com/spegel-org/spegel/pull/747) Update Go to 1.23.6.
- [#750](https://github.com/spegel-org/spegel/pull/750) Rename append mirrors to prepend existing.
- [#373](https://github.com/spegel-org/spegel/pull/373) Apply mirror configuration on all registires by default.
- [#762](https://github.com/spegel-org/spegel/pull/762) Set appropriate buckets for response size
- [#778](https://github.com/spegel-org/spegel/pull/778) Replace interface{} with any alias.
- [#784](https://github.com/spegel-org/spegel/pull/784) Refactor distribution and move to OCI package.
- [#787](https://github.com/spegel-org/spegel/pull/787) Refactor OCI image to allow parsing without digest.
- [#794](https://github.com/spegel-org/spegel/pull/794) Set default memory request and limit in Helm chart.
### Removed
- [#796](https://github.com/spegel-org/spegel/pull/796) Remove name from OCI image struct.
- [#799](https://github.com/spegel-org/spegel/pull/799) Remove Kubernetes bootstrapper.
### Fixed
- [#743](https://github.com/spegel-org/spegel/pull/743) Remove metrics label from bootstrap service in Helm chart.
- [#748](https://github.com/spegel-org/spegel/pull/748) Fix topology annotation.
- [#785](https://github.com/spegel-org/spegel/pull/785) Fix verification of digests when parsing distribution path.
- [#798](https://github.com/spegel-org/spegel/pull/798) Restart Spegel if Containerd event subscription is disconnected.
- [#800](https://github.com/spegel-org/spegel/pull/800) Fix so that host is closed even when a bootstrap error occurs.
- [#801](https://github.com/spegel-org/spegel/pull/801) Fix helm values naming for additionalMirrorTargets and mirroredRegistries.
## v0.0.30
### Changed
- [#694](https://github.com/spegel-org/spegel/pull/694) Replace IP in multi address with manet.
- [#693](https://github.com/spegel-org/spegel/pull/693) Add commonLabels for pods.
- [#699](https://github.com/spegel-org/spegel/pull/699) Remove as mismatch error and replace with errors as.
- [#701](https://github.com/spegel-org/spegel/pull/701) Rewrite e2e tests in Go.
- [#704](https://github.com/spegel-org/spegel/pull/704) Update Containerd client to v2.
### Fixed
- [#689](https://github.com/spegel-org/spegel/pull/689) Make cluster domain configurable.
- [#696](https://github.com/spegel-org/spegel/pull/696) Fix DNS bootstrap self check.
- [#702](https://github.com/spegel-org/spegel/pull/702) Refactor and add tests for p2p ready.
- [#703](https://github.com/spegel-org/spegel/pull/703) Fix p2p router close panic and add tests.
## v0.0.29
### Added
- [#678](https://github.com/spegel-org/spegel/pull/678) Add support for setting common labels in Helm chart.
- [#681](https://github.com/spegel-org/spegel/pull/681) Add import as linter.
### Changed
- [#683](https://github.com/spegel-org/spegel/pull/683) Change bootstrapper to allow returning multiple peers.
- [#684](https://github.com/spegel-org/spegel/pull/684) Allow bootstrappers to return multiaddress only containing IP.
- [#680](https://github.com/spegel-org/spegel/pull/680) Switch to using headless service for bootstrapping.
## v0.0.28
### Added
- [#576](https://github.com/spegel-org/spegel/pull/576) Add support for range requests for blobs.
- [#621](https://github.com/spegel-org/spegel/pull/621) Added Mermaid diagrams documentation to help explain Spegel's inner workings.
- [#629](https://github.com/spegel-org/spegel/pull/629) Document how to use multiple Spegel deployments in the same cluster.
- [#661](https://github.com/spegel-org/spegel/pull/661) Add allocs to pprof endpoints.
### Changed
- [#608](https://github.com/spegel-org/spegel/pull/608) Use custom proxy transport and increase idle connections per host.
### Fixed
- [#651](https://github.com/spegel-org/spegel/pull/651) Fix Containerd CRI config verification.
- [#660](https://github.com/spegel-org/spegel/pull/660) Add accept ranges header to blob HEAD request.
## v0.0.27
### Fixed
- [#603](https://github.com/spegel-org/spegel/pull/603) Fix append to backup always happening.
- [#604](https://github.com/spegel-org/spegel/pull/604) Create empty backup directory when mirror directory is empty.
## v0.0.26
### Removed
- [#596](https://github.com/spegel-org/spegel/pull/596) Remove throttling from blobs.
### Fixed
- [#601](https://github.com/spegel-org/spegel/pull/601) Fix Containerd host mirror ordering.
## v0.0.25
### Added
- [#578](https://github.com/spegel-org/spegel/pull/578) Add possibility to override environment variable NODE_IP.
### Changed
- [#575](https://github.com/spegel-org/spegel/pull/575) Update to Go v1.23.2.
### Fixed
- [#581](https://github.com/spegel-org/spegel/pull/581) Skip status response verification for containerd v2
## v0.0.24
### Added
- [#538](https://github.com/spegel-org/spegel/pull/538) Replace mock OCI client with in memory client.
- [#552](https://github.com/spegel-org/spegel/pull/552) Add support for VerticalPodAutoscaler in the Helm chart.
- [#556](https://github.com/spegel-org/spegel/pull/556) Add configuration for revisionHistoryLimit in the Helm Chart.
- [#573](https://github.com/spegel-org/spegel/pull/573) Use buffer pool for proxy copying data.
### Changed
- [#518](https://github.com/spegel-org/spegel/pull/518) Extend tests for image.
- [#519](https://github.com/spegel-org/spegel/pull/519) Extend tests for containerd.
- [#520](https://github.com/spegel-org/spegel/pull/520) Add tests for metrics.
- [#536](https://github.com/spegel-org/spegel/pull/536) Update Go version to 1.22.5.
- [#547](https://github.com/spegel-org/spegel/pull/547) Set blob content type to disable detection.
- [#553](https://github.com/spegel-org/spegel/pull/553) Re-use resources value for initContainer in the Helm Chart.
### Deprecated
### Removed
- [#517](https://github.com/spegel-org/spegel/pull/517) Remove deprecated CopyLayer function.
### Fixed
- [#535](https://github.com/spegel-org/spegel/pull/535) Fix Docker build casing checks.
### Security
## v0.0.23
### Added
- [#388](https://github.com/spegel-org/spegel/pull/388) Add support for deploying the Grafana dashboard with the Helm chart.
### Changed
- [#475](https://github.com/spegel-org/spegel/pull/475) Move resolving ref to digest to manifest handler.
- [#477](https://github.com/spegel-org/spegel/pull/477) Refactor distribution ref to simplify registry routing.
- [#479](https://github.com/spegel-org/spegel/pull/479) Enable goimports linter and fix errors.
- [#480](https://github.com/spegel-org/spegel/pull/480) Enable ireturn linter and fix errors.
- [#481](https://github.com/spegel-org/spegel/pull/481) Enable perfsprint linter and fix errors.
- [#482](https://github.com/spegel-org/spegel/pull/482) Enable gocritic linter and fix errors.
- [#483](https://github.com/spegel-org/spegel/pull/483) Update errcheck linter configuration and fix errors.
- [#487](https://github.com/spegel-org/spegel/pull/487) Move mirror metrics code to mirror handler.
- [#488](https://github.com/spegel-org/spegel/pull/488) Update existing registry errors and add more detail.
- [#495](https://github.com/spegel-org/spegel/pull/495) Modify e2e tests to allow reusing the same kind cluster.
- [#498](https://github.com/spegel-org/spegel/pull/498) Update to Go 1.22.
- [#499](https://github.com/spegel-org/spegel/pull/499) Add paralleltest linter and set all unit tests to run in parallel.
- [#501](https://github.com/spegel-org/spegel/pull/501) Rename mock router to memory router and add tests.
- [#507](https://github.com/spegel-org/spegel/pull/507) Change default resolve timeout to 20ms.
### Fixed
- [#460](https://github.com/spegel-org/spegel/pull/460) Fix environment variable for http-bootstrap-addr flag.
- [#471](https://github.com/spegel-org/spegel/pull/471) Fix handler key in request logging.
- [#490](https://github.com/spegel-org/spegel/pull/490) Close immediate channel after writing to it to close wait group in merge logic.
- [#491](https://github.com/spegel-org/spegel/pull/491) Fix so that resolve timeout does not cancel mirroring attempts.
- [#496](https://github.com/spegel-org/spegel/pull/496) Fix p2p bootstrap to run on failed readiness check.
## v0.0.22
### Added
### Added
- [#435](https://github.com/spegel-org/spegel/pull/435) Add pprof endpoints to enable profiling.
- [#434](https://github.com/spegel-org/spegel/pull/434) Add optional Containerd local content store to increase serve performance.
- [#438](https://github.com/spegel-org/spegel/pull/438) Set host path type for Containerd socket.
- [#449](https://github.com/spegel-org/spegel/pull/449) Replace zapr with slog and add log level configuration.
### Changed
- [#439](https://github.com/spegel-org/spegel/pull/439) Update Go version and fix toolchain version.
- [#436](https://github.com/spegel-org/spegel/pull/436) Replace http util reverese proxy with custom request forwarding.
### Deprecated
### Removed
### Fixed
- [#452](https://github.com/spegel-org/spegel/pull/452) Fix Containerd Subscribe returning on any error.
### Security
- [#451](https://github.com/spegel-org/spegel/pull/451) Bump golang.org/x/net from 0.21.0 to 0.23.0.
## v0.0.21
### Added
### Added
- [#421](https://github.com/spegel-org/spegel/pull/421) Add conformance tests to e2e test.
- [#424](https://github.com/spegel-org/spegel/pull/424) Add option to append mirror configuration instead of overwriting.
@ -312,8 +42,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- [#431](https://github.com/spegel-org/spegel/pull/431) Fix import error caused by invalid file name.
## v0.0.20
### Added
### Added
- [#416](https://github.com/spegel-org/spegel/pull/416) Add image and Helm chart signing with Cosign.
@ -332,11 +62,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.19
> [!IMPORTANT]
> [!IMPORTANT]
> The Spegel repository has been moved from XenitAB to a new GitHub organization.
> Make sure to update the organization in the image and chart references.
### Added
### Added
- [#335](https://github.com/spegel-org/spegel/pull/335) Add k3s to compatibility guide.
- [#359](https://github.com/spegel-org/spegel/pull/359) Extend OCI client tests.
@ -374,7 +104,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.18
### Added
### Added
- [#331](https://github.com/spegel-org/spegel/pull/331) Document possible modifications required for k8s-digester.
- [#337](https://github.com/spegel-org/spegel/pull/337) Add HTTP bootstrapper.
@ -401,7 +131,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.17
### Added
### Added
- [#299](https://github.com/spegel-org/spegel/pull/299) Add update strategy configuration to Helm chart.
@ -424,7 +154,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.15
### Added
### Added
- [#270](https://github.com/spegel-org/spegel/pull/270) Add tests for local and external service port.
- [#262](https://github.com/spegel-org/spegel/pull/262) Enable misspell linter and fix spelling mistakes.
@ -441,7 +171,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.14
### Added
### Added
- [#237](https://github.com/spegel-org/spegel/pull/237) Verify discard unpacked layers setting.
@ -456,7 +186,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.13
### Added
### Added
- [#195](https://github.com/spegel-org/spegel/pull/195) Fix daemonset argument namespace to use helper-defined namespace value.
@ -477,7 +207,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.12
### Added
### Added
- [#182](https://github.com/spegel-org/spegel/pull/182) Add lscr.io as default registry.
@ -492,7 +222,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.11
### Added
### Added
- [#170](https://github.com/spegel-org/spegel/pull/170) Backup existing Containerd mirror configuration.
- [#171](https://github.com/spegel-org/spegel/pull/171) Add option to disable resolve.
@ -503,7 +233,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.10
### Added
### Added
- [#145](https://github.com/spegel-org/spegel/pull/145) Add new field to override Helm chart namespace.
- [#153](https://github.com/spegel-org/spegel/pull/153) Add option to disable resolving latest tags.
@ -537,7 +267,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.8
### Added
### Added
- [#125](https://github.com/spegel-org/spegel/pull/125) Add retry mirroring to new peer if current peer fails.
- [#127](https://github.com/spegel-org/spegel/pull/127) Add configuration for resolve retry and timeout.
@ -569,21 +299,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- [#42](https://github.com/spegel-org/spegel/pull/42) Only use bootstrap function for initial peer discovery.
- [#66](https://github.com/spegel-org/spegel/pull/66) Move mirror configuration logic to run as an init container.
### Fixed
- [#71](https://github.com/spegel-org/spegel/pull/71) Fix priority class name.
## v0.0.5
### Added
### Added
- [#29](https://github.com/spegel-org/spegel/pull/29) Make priority class name configurable and set a default value.
- [#49](https://github.com/spegel-org/spegel/pull/49) Add registry.k8s.io to registry mirror list.
- [#56](https://github.com/spegel-org/spegel/pull/56) Add gcr.io and k8s.gcr.io registries to default list.
### Changed
- [#32](https://github.com/spegel-org/spegel/pull/32) Update Go to 1.20.
- [#33](https://github.com/spegel-org/spegel/pull/33) Remove containerd info call when handling manifest request.
- [#48](https://github.com/spegel-org/spegel/pull/48) Replace multierr with stdlib errors join.
@ -604,7 +334,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## v0.0.3
### Added
### Added
- [#18](https://github.com/spegel-org/spegel/pull/18) Add support to use Spegel instance on another node.

View File

@ -7,12 +7,8 @@ Thank you for considering contributing to Spegel, hopefully this document will m
The following tools are required to run the tests properly.
* go
* [golangci-lint](https://github.com/golangci/golangci-lint)
* [kind](https://github.com/kubernetes-sigs/kind)
* [goreleaser](https://github.com/goreleaser/goreleaser)
* [docker](https://docs.docker.com/get-started/get-docker/)
* [helm](https://github.com/helm/helm)
* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
* golangci-lint
* kind
Run the linter and the unit tests to quickly validate changes.
@ -23,13 +19,13 @@ make lint test
Run the e2e tests which take a bit more time.
```shell
make test-e2e
make e2e
```
There are e2e tests for the different CNIs iptables, iptables-v6, and ipvs.
```shell
make test-e2e E2E_CNI=ipvs
make e2e CNI=ipvs
```
## Building
@ -37,32 +33,16 @@ make test-e2e E2E_CNI=ipvs
Build the Docker image locally.
```shell
make build-image
make docker-build
```
It is possible to specify a different image name and tag.
```shell
make build-image IMG=example.com/spegel TAG=feature
make docker-build IMG=exmaple.com/spegel TAG=feature
```
### Local debugging
Run the `dev-deploy` recipe which will create a Kind cluster with the proper configuration and deploy Spegel into it. If you run this command a second time the cluster will be kept but Spegel will be updated.
```shell
make dev-deploy
```
After the command has run you can get a kubeconfig file to access the cluster and do any debugging.
```shell
kind get kubeconfig --name spegel-dev > kubeconfig
export KUBECOONFIG=$(pwd)/kubeconfig
kubectl -n spegel get pods
```
## Generate Helm documentation
## Generating documentation
Changes to the Helm chart values will require the documentation to be regenerated.

View File

@ -1,6 +1,16 @@
FROM golang:1.21.7@sha256:549dd88a1a53715f177b41ab5fee25f7a376a6bb5322ac7abe263480d9554021 as builder
RUN mkdir /build
WORKDIR /build
COPY go.mod go.mod
COPY go.sum go.sum
RUN go mod download
COPY main.go main.go
COPY internal/ internal/
COPY pkg/ pkg/
RUN CGO_ENABLED=0 go build -installsuffix 'static' -o spegel .
FROM gcr.io/distroless/static:nonroot
ARG TARGETOS
ARG TARGETARCH
COPY ./dist/spegel_${TARGETOS}_${TARGETARCH}/spegel /
COPY --from=builder /build/spegel /app/
WORKDIR /app
USER root:root
ENTRYPOINT ["/spegel"]
ENTRYPOINT ["./spegel"]

View File

@ -1,29 +1,19 @@
TAG = $$(git rev-parse --short HEAD)
IMG_NAME ?= ghcr.io/spegel-org/spegel
IMG_REF = $(IMG_NAME):$(TAG)
E2E_PROXY_MODE ?= iptables
E2E_IP_FAMILY ?= ipv4
IMG ?= ghcr.io/spegel-org/spegel:$(TAG)
CNI ?= iptables
lint:
golangci-lint run ./...
build:
goreleaser build --snapshot --clean --single-target --skip before
build-image: build
docker build -t ${IMG_REF} .
test-unit:
.PHONY: test
test:
go test ./...
test-e2e: build-image
IMG_REF=${IMG_REF} \
E2E_PROXY_MODE=${E2E_PROXY_MODE} \
E2E_IP_FAMILY=${E2E_IP_FAMILY} \
go test ./test/e2e -v -timeout 200s -tags e2e -count 1 -run TestE2E
docker-build:
docker build -t ${IMG} .
dev-deploy: build-image
IMG_REF=${IMG_REF} go test ./test/e2e -v -timeout 200s -tags e2e -count 1 -run TestDevDeploy
e2e: docker-build
./test/e2e/e2e.sh ${IMG} ${CNI}
tools:
GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs

View File

@ -1,17 +1,14 @@
> [!NOTE]
> Weve started hosting community meetings every Tuesday at 17:00 CET. Find out how to participate at https://spegel.dev/project/community/#meeting.
# Spegel
Spegel, mirror in Swedish, is a stateless cluster local OCI registry mirror.
<p align="center">
<img src="https://spegel.dev/images/overview.gif">
<img src="./assets/overview.gif">
</p>
## Features
## Use Cases
Spegel is for you if you are looking to do any of the following.
Spegel is for you if you are looking to do any of the following:
* Locally cache images from external registries with no explicit configuration.
* Avoid cluster failure during external registry downtime.
@ -20,13 +17,64 @@ Spegel is for you if you are looking to do any of the following.
* Decrease egressing traffic outside of the clusters network.
* Increase image pull efficiency in edge node deployments.
## Getting Started
## Background
Read the [getting started](https://spegel.dev/docs/getting-started/) guide to deploy Spegel.
Kubernetes does a great job at distributing workloads on multiple nodes. Allowing node failures to occur without affecting uptime. A critical component for this to work is that each node has to be able to pull the workload images before they can start. Each replica running on a node will incur a pull operation. The images may be pulled from geographically close registries within the cloud provider, public registries, or self-hosted registries. This process has a flaw in that each node has to make this round trip separately. Why can't the nodes share the image among themselves?
## Contributing
Spegel enables each node in a Kubernetes cluster to act as a local registry mirror, allowing nodes to share images between themselves. Any image already pulled by a node will be available for any other node in the cluster to pull.
Read [contribution guidelines](./CONTRIBUTING.md) for instructions on how to build and test Spegel.
This has the benefit of reducing workload startup times and egress traffic as images will be stored locally within the cluster. On top of that it allows the scheduling of new workloads even when external registries are down.
## Installation
Before installing Spegel check the [compatibility guide](./docs/COMPATIBILITY.md) to make sure that it will work with your specific Kubernetes flavor. If everything checks out, the easiest method to deploy Spegel is with Helm.
```shell
helm upgrade --create-namespace --namespace spegel --install --version v0.0.21 spegel oci://ghcr.io/spegel-org/helm-charts/spegel
```
Refer to the [Helm Chart](./charts/spegel) for detailed configuration documentation.
## FAQ
Please consult the [FAQ](./docs/FAQ.md) if you run into any problems.
## Architecture
Spegel can run as a stateless application by exploiting the fact that an image pulled by a node is not immediately garbage collected. Spegel is deployed as a Daemonset on each node which acts as both the registry and mirror. Each instance is reachable both locally through a host port and a Service. This enables Containerd to be configured to use the localhost interface as a registry mirror and for Spegel instances to forward requests to each other.
<p align="center">
<img src="./assets/architecture.jpg">
</p>
Images are composed of multiple layers which are stored as individual files on the node disk. Each layer has a digest which is its identifier. Every node advertises the digests which are stored locally on disk. Kademlia is used to enable a distributed advertisement and lookup of digests. An image pull consists of multiple HTTP requests with one request per digest. The request is first sent to Spegel when an image is pulled if it is configured to act as the mirror for the registry. Spegel will lookup the digest within the cluster to see if any node has advertised that they have it. If a node is found the request will be forwarded to that Spegel instance which will serve the file with the specified digest. If a node is not found a 404 response will be returned and Containerd will fallback to using the actual remote registry.
In its core Spegel is a pull only OCI registry which runs locally on every Node in the Kubernetes cluster. Containerd is configured to use the local registry as a mirror, which would serve the image from within the cluster or from the source registry.
## Alternatives
### Private Registry
A common practice, especially for larger enterprises, is to run a private registry like Harbor to replicate images from public registries, storing them within the private network close to the cluster.
This is a great option for those who have the time and budget to invest in running and managing the infrastructure. For others, it may be a good practice but unattainable in reality.
Spegel does not aim to replace projects like [Harbor](https://github.com/goharbor/harbor) or [Zot](https://github.com/project-zot/zot) but instead complements them. Having a persistent copy of public images stored geographically close to a cluster is great. Spegel will however enable
nodes to pull from images closer as long as the images are somewhere within the cluster. Additionally, there is no guarantee that a self-managed private registry is always available. In these scenarios
running Spegel is like wearing both belt and suspenders.
### Dragonfly
[Dragonfly](https://github.com/dragonflyoss/Dragonfly2) is a great project that has been around for a while. In some aspects, Spegel takes inspiration from the work done by Dragonfly.
The difference is that Spegel aims to solve a smaller problem set. While it may mean fewer features it also means fewer moving components. Dragonfly requires both Redis and MySQL which
increases the resource consumption and burden on end users to manage additional resources. It also increases the risk of errors occurring during critical moments. The benefit of Spegel
is that it is stateless meaning that any temporary failure of nodes and communication should be easily resolved automatically.
### Kraken
[Kraken](https://github.com/uber/kraken) implements a similar solution to Spegel with its P2P agent component. It is however not heavily maintained, meaning that new features and security updates will not be added.
The problem set that Kraken is attempting to solve is however different from Spegel. It's focused on speeding up image distribution from registries serving thousands of large images. It does this by
having trackers and seeders distribute image layers through a BitTorrent-like method. This means that Kraken requires more moving components to function. Kraken also does not support using it
as a transparent pull-through mirror. Meaning that any image that is supposed to be pulled through Kraken will require changing the registry URL in the image name. This has to be done for all
Pods in the cluster.
## Acknowledgements

BIN
assets/architecture.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

1
assets/diagrams.drawio Normal file

File diff suppressed because one or more lines are too long

BIN
assets/overview.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

View File

@ -2,21 +2,61 @@
Stateless cluster local OCI registry mirror.
Read the [getting started](https://spegel.dev/docs/getting-started/) guide to deploy Spegel.
## Installation
Make sure that you have read the [compatibility guide](../../docs/COMPATIBILITY.md) before proceeding the with the installation.
### CLI
Delpoy Spegel with the Helm CLI.
```sh
helm upgrade --create-namespace --namespace spegel --install --version v0.0.21 spegel oci://ghcr.io/spegel-org/helm-charts/spegel
```
### Flux
Deploy Spegel with Flux.
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: spegel
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: spegel
namespace: spegel
spec:
type: "oci"
interval: 5m0s
url: oci://ghcr.io/spegel-org/helm-charts
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: spegel
namespace: spegel
spec:
interval: 1m
chart:
spec:
chart: spegel
version: "v0.0.21"
interval: 5m
sourceRef:
kind: HelmRepository
name: spegel
```
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Affinity settings for pod assignment. |
| basicAuthSecretName | string | `""` | Name of secret containing basic authentication credentials for registry. |
| clusterDomain | string | `"cluster.local."` | Domain configured for service domain names. |
| commonLabels | object | `{}` | Common labels to apply to all rendered resources. |
| fullnameOverride | string | `""` | Overrides the full name of the chart. |
| grafanaDashboard.annotations | object | `{}` | Annotations that ConfigMaps can have to get configured in Grafana, See: sidecar.dashboards.folderAnnotation for specifying the dashboard folder. https://github.com/grafana/helm-charts/tree/main/charts/grafana |
| grafanaDashboard.enabled | bool | `false` | If true creates a Grafana dashboard. |
| grafanaDashboard.sidecarLabel | string | `"grafana_dashboard"` | Label that ConfigMaps should have to be loaded as dashboards. |
| grafanaDashboard.sidecarLabelValue | string | `"1"` | Label value that ConfigMaps should have to be loaded as dashboards. |
| image.digest | string | `""` | Image digest. |
| image.pullPolicy | string | `"IfNotPresent"` | Image Pull Policy. |
| image.repository | string | `"ghcr.io/spegel-org/spegel"` | Image repository. |
@ -28,13 +68,10 @@ Read the [getting started](https://spegel.dev/docs/getting-started/) guide to de
| podAnnotations | object | `{}` | Annotations to add to the pod. |
| podSecurityContext | object | `{}` | Security context for the pod. |
| priorityClassName | string | `"system-node-critical"` | Priority class name to use for the pod. |
| resources | object | `{"limits":{"memory":"128Mi"},"requests":{"memory":"128Mi"}}` | Resource requests and limits for the Spegel container. |
| revisionHistoryLimit | int | `10` | The number of old history to retain to allow rollback. |
| securityContext | object | `{"readOnlyRootFilesystem":true}` | Security context for the Spegel container. |
| service.cleanup.port | int | `8080` | Port to expose cleanup probe on. |
| resources | object | `{}` | Resource requests and limits for the Spegel container. |
| securityContext | object | `{}` | Security context for the Spegel container. |
| service.metrics.port | int | `9090` | Port to expose the metrics via the service. |
| service.registry.hostPort | int | `30020` | Local host port to expose the registry. |
| service.registry.nodeIp | string | `""` | Override the NODE_ID environment variable. It defaults to the field status.hostIP |
| service.registry.nodePort | int | `30021` | Node port to expose the registry via the service. |
| service.registry.port | int | `5000` | Port to expose the registry via the service. |
| service.registry.topologyAwareHintsEnabled | bool | `true` | If true adds topology aware hints annotation to node port service. |
@ -44,30 +81,20 @@ Read the [getting started](https://spegel.dev/docs/getting-started/) guide to de
| serviceMonitor.enabled | bool | `false` | If true creates a Prometheus Service Monitor. |
| serviceMonitor.interval | string | `"60s"` | Prometheus scrape interval. |
| serviceMonitor.labels | object | `{}` | Service monitor specific labels for prometheus to discover servicemonitor. |
| serviceMonitor.metricRelabelings | list | `[]` | List of relabeling rules to apply to the samples before ingestion. |
| serviceMonitor.relabelings | list | `[]` | List of relabeling rules to apply the targets metadata labels. |
| serviceMonitor.scrapeTimeout | string | `"30s"` | Prometheus scrape interval timeout. |
| spegel.additionalMirrorTargets | list | `[]` | Additional target mirror registries other than Spegel. |
| spegel.additionalMirrorRegistries | list | `[]` | Additional target mirror registries other than Spegel. |
| spegel.appendMirrors | bool | `false` | When true existing mirror configuration will be appended to instead of replaced. |
| spegel.blobSpeed | string | `""` | Maximum write speed per request when serving blob layers. Should be an integer followed by unit Bps, KBps, MBps, GBps, or TBps. |
| spegel.containerdContentPath | string | `"/var/lib/containerd/io.containerd.content.v1.content"` | Path to Containerd content store.. |
| spegel.containerdMirrorAdd | bool | `true` | If true Spegel will add mirror configuration to the node. |
| spegel.containerdNamespace | string | `"k8s.io"` | Containerd namespace where images are stored. |
| spegel.containerdRegistryConfigPath | string | `"/etc/containerd/certs.d"` | Path to Containerd mirror configuration. |
| spegel.containerdSock | string | `"/run/containerd/containerd.sock"` | Path to Containerd socket. |
| spegel.debugWebEnabled | bool | `false` | When true enables debug web page. |
| spegel.logLevel | string | `"INFO"` | Minimum log level to output. Value should be DEBUG, INFO, WARN, or ERROR. |
| spegel.mirrorResolveRetries | int | `3` | Max amount of mirrors to attempt. |
| spegel.mirrorResolveTimeout | string | `"20ms"` | Max duration spent finding a mirror. |
| spegel.mirroredRegistries | list | `[]` | Registries for which mirror configuration will be created. Empty means all registires will be mirrored. |
| spegel.prependExisting | bool | `false` | When true existing mirror configuration will be kept and Spegel will prepend it's configuration. |
| spegel.kubeconfigPath | string | `""` | Path to Kubeconfig credentials, should only be set if Spegel is run in an environment without RBAC. |
| spegel.mirrorResolveRetries | int | `3` | Max ammount of mirrors to attempt. |
| spegel.mirrorResolveTimeout | string | `"5s"` | Max duration spent finding a mirror. |
| spegel.registries | list | `["https://cgr.dev","https://docker.io","https://ghcr.io","https://quay.io","https://mcr.microsoft.com","https://public.ecr.aws","https://gcr.io","https://registry.k8s.io","https://k8s.gcr.io","https://lscr.io"]` | Registries for which mirror configuration will be created. |
| spegel.resolveLatestTag | bool | `true` | When true latest tags will be resolved to digests. |
| spegel.resolveTags | bool | `true` | When true Spegel will resolve tags to digests. |
| tolerations | list | `[{"key":"CriticalAddonsOnly","operator":"Exists"},{"effect":"NoExecute","operator":"Exists"},{"effect":"NoSchedule","operator":"Exists"}]` | Tolerations for pod assignment. |
| updateStrategy | object | `{}` | An update strategy to replace existing pods with new pods. |
| verticalPodAutoscaler.controlledResources | list | `[]` | List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory |
| verticalPodAutoscaler.controlledValues | string | `"RequestsAndLimits"` | Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. |
| verticalPodAutoscaler.enabled | bool | `false` | If true creates a Vertical Pod Autoscaler. |
| verticalPodAutoscaler.maxAllowed | object | `{}` | Define the max allowed resources for the pod |
| verticalPodAutoscaler.minAllowed | object | `{}` | Define the min allowed resources for the pod |
| verticalPodAutoscaler.recommenders | list | `[]` | Recommender responsible for generating recommendation for the object. List should be empty (then the default recommender will generate the recommendation) or contain exactly one recommender. |
| verticalPodAutoscaler.updatePolicy.minReplicas | int | `2` | Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction |
| verticalPodAutoscaler.updatePolicy.updateMode | string | `"Auto"` | Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". |
| updateStrategy | object | `{}` | An update strategy to replace existing pods with new pods. |

View File

@ -2,6 +2,54 @@
{{ template "chart.description" . }}
Read the [getting started](https://spegel.dev/docs/getting-started/) guide to deploy Spegel.
## Installation
{{ template "chart.valuesSection" . }}
Make sure that you have read the [compatibility guide](../../docs/COMPATIBILITY.md) before proceeding the with the installation.
### CLI
Delpoy Spegel with the Helm CLI.
```sh
helm upgrade --create-namespace --namespace spegel --install --version v0.0.21 spegel oci://ghcr.io/spegel-org/helm-charts/spegel
```
### Flux
Deploy Spegel with Flux.
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: spegel
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository
metadata:
name: spegel
namespace: spegel
spec:
type: "oci"
interval: 5m0s
url: oci://ghcr.io/spegel-org/helm-charts
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: spegel
namespace: spegel
spec:
interval: 1m
chart:
spec:
chart: spegel
version: "v0.0.21"
interval: 5m
sourceRef:
kind: HelmRepository
name: spegel
```
{{ template "chart.valuesSection" . }}

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,7 @@ If release name contains chart name it will be used as a full name.
{{- end }}
{{/*
Creates the namespace for the chart.
Creates the namespace for the chart.
Defaults to the Release namespace unless the namespaceOverride is defined.
*/}}
{{- define "spegel.namespace" -}}
@ -53,13 +53,6 @@ helm.sh/chart: {{ include "spegel.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.commonLabels }}
{{ toYaml . }}
{{- end }}
{{- end }}
{{/*
{{- end }}
{{- end }}
{{/*
@ -87,16 +80,3 @@ Image reference
{{- .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
{{- end }}
{{- end }}
{{/*
Host networking
*/}}
{{- define "networking.nodeIp" -}}
{{- if .Values.service.registry.nodeIp -}}
value: {{ .Values.service.registry.nodeIp }}
{{- else -}}
valueFrom:
fieldRef:
fieldPath: status.hostIP
{{- end -}}
{{- end -}}

View File

@ -6,7 +6,6 @@ metadata:
labels:
{{- include "spegel.labels" . | nindent 4 }}
spec:
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
updateStrategy:
{{- toYaml .Values.updateStrategy | nindent 4 }}
selector:
@ -20,9 +19,6 @@ spec:
{{- end }}
labels:
{{- include "spegel.selectorLabels" . | nindent 8 }}
{{- with .Values.commonLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
@ -41,37 +37,31 @@ spec:
{{- toYaml .Values.securityContext | nindent 12 }}
args:
- configuration
- --log-level={{ .Values.spegel.logLevel }}
- --containerd-registry-config-path={{ .Values.spegel.containerdRegistryConfigPath }}
{{- with .Values.spegel.mirroredRegistries }}
- --mirrored-registries
{{- with .Values.spegel.registries }}
- --registries
{{- range . }}
- {{ . | quote }}
{{- end }}
{{- end }}
- --mirror-targets
- --mirror-registries
- http://$(NODE_IP):{{ .Values.service.registry.hostPort }}
- http://$(NODE_IP):{{ .Values.service.registry.nodePort }}
{{- with .Values.spegel.additionalMirrorTargets }}
{{- with .Values.spegel.additionalMirrorRegistries }}
{{- range . }}
- {{ . | quote }}
{{- end }}
{{- end }}
- --resolve-tags={{ .Values.spegel.resolveTags }}
- --prepend-existing={{ .Values.spegel.prependExisting }}
- --append-mirrors={{ .Values.spegel.appendMirrors }}
env:
- name: NODE_IP
{{- include "networking.nodeIp" . | nindent 10 }}
resources:
{{- toYaml .Values.resources | nindent 10 }}
valueFrom:
fieldRef:
fieldPath: status.hostIP
volumeMounts:
- name: containerd-config
mountPath: {{ .Values.spegel.containerdRegistryConfigPath }}
{{- if .Values.basicAuthSecretName }}
- name: basic-auth
mountPath: "/etc/secrets/basic-auth"
readOnly: true
{{- end }}
{{- end }}
containers:
- name: registry
@ -81,14 +71,13 @@ spec:
{{- toYaml .Values.securityContext | nindent 12 }}
args:
- registry
- --log-level={{ .Values.spegel.logLevel }}
- --mirror-resolve-retries={{ .Values.spegel.mirrorResolveRetries }}
- --mirror-resolve-timeout={{ .Values.spegel.mirrorResolveTimeout }}
- --registry-addr=:{{ .Values.service.registry.port }}
- --router-addr=:{{ .Values.service.router.port }}
- --metrics-addr=:{{ .Values.service.metrics.port }}
{{- with .Values.spegel.mirroredRegistries }}
- --mirrored-registries
{{- with .Values.spegel.registries }}
- --registries
{{- range . }}
- {{ . | quote }}
{{- end }}
@ -96,32 +85,25 @@ spec:
- --containerd-sock={{ .Values.spegel.containerdSock }}
- --containerd-namespace={{ .Values.spegel.containerdNamespace }}
- --containerd-registry-config-path={{ .Values.spegel.containerdRegistryConfigPath }}
- --bootstrap-kind=dns
- --dns-bootstrap-domain={{ include "spegel.fullname" . }}-bootstrap.{{ include "spegel.namespace" . }}.svc.{{ .Values.clusterDomain }}
- --bootstrap-kind=kubernetes
{{- with .Values.spegel.kubeconfigPath }}
- --kubeconfig-path={{ . }}
{{- end }}
- --leader-election-namespace={{ include "spegel.namespace" . }}
- --leader-election-name={{ .Release.Name }}-leader-election
- --resolve-latest-tag={{ .Values.spegel.resolveLatestTag }}
- --local-addr=$(NODE_IP):{{ .Values.service.registry.hostPort }}
{{- with .Values.spegel.blobSpeed }}
- --blob-speed={{ . }}
{{- end }}
{{- with .Values.spegel.containerdContentPath }}
- --containerd-content-path={{ . }}
{{- end }}
- --debug-web-enabled={{ .Values.spegel.debugWebEnabled }}
env:
- name: DATA_DIR
value: ""
{{- if ((.Values.resources).limits).cpu }}
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
resource: limits.cpu
divisor: 1
{{- end }}
{{- if ((.Values.resources).limits).memory }}
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: 1
{{- end }}
- name: NODE_IP
{{- include "networking.nodeIp" . | nindent 10 }}
valueFrom:
fieldRef:
fieldPath: status.hostIP
ports:
- name: registry
containerPort: {{ .Values.service.registry.port }}
@ -136,7 +118,7 @@ spec:
# Startup may take a bit longer on bootsrap as Pods need to find each other.
# This is why the startup proben is a bit more forgiving, while hitting the endpoint more often.
startupProbe:
periodSeconds: 3
periodSeconds: 1
failureThreshold: 60
httpGet:
path: /healthz
@ -146,11 +128,6 @@ spec:
path: /healthz
port: registry
volumeMounts:
{{- if .Values.basicAuthSecretName }}
- name: basic-auth
mountPath: "/etc/secrets/basic-auth"
readOnly: true
{{- end }}
- name: containerd-sock
mountPath: {{ .Values.spegel.containerdSock }}
{{- with .Values.spegel.containerdContentPath }}
@ -161,15 +138,9 @@ spec:
resources:
{{- toYaml .Values.resources | nindent 10 }}
volumes:
{{- with .Values.basicAuthSecretName }}
- name: basic-auth
secret:
secretName: {{ . }}
{{- end }}
- name: containerd-sock
hostPath:
path: {{ .Values.spegel.containerdSock }}
type: Socket
{{- with .Values.spegel.containerdContentPath }}
- name: containerd-content
hostPath:

View File

@ -1,17 +0,0 @@
{{- if .Values.grafanaDashboard.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "spegel.fullname" . }}-dashboard
namespace: {{ include "spegel.namespace" . }}
labels:
{{ .Values.grafanaDashboard.sidecarLabel }}: {{ .Values.grafanaDashboard.sidecarLabelValue | quote }}
{{- include "spegel.labels" . | nindent 4 }}
{{- with .Values.grafanaDashboard.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
data:
spegel.json: |-
{{ .Files.Get "monitoring/grafana-dashboard.json" | indent 6 }}
{{- end }}

View File

@ -1,106 +0,0 @@
{{- if .Values.spegel.containerdMirrorAdd }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "spegel.fullname" . }}-cleanup
namespace: {{ include "spegel.namespace" . }}
labels:
app.kubernetes.io/component: cleanup
{{- include "spegel.labels" . | nindent 4 }}
annotations:
helm.sh/hook: "post-delete"
helm.sh/hook-delete-policy: "before-hook-creation, hook-succeeded"
helm.sh/hook-weight: "0"
spec:
selector:
matchLabels:
app.kubernetes.io/component: cleanup
{{- include "spegel.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
app.kubernetes.io/component: cleanup
{{- include "spegel.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
priorityClassName: {{ .Values.priorityClassName }}
containers:
- name: cleanup
image: "{{ include "spegel.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- cleanup
- --containerd-registry-config-path={{ .Values.spegel.containerdRegistryConfigPath }}
- --addr=:{{ .Values.service.cleanup.port }}
readinessProbe:
httpGet:
path: /healthz
port: readiness
ports:
- name: readiness
containerPort: {{ .Values.service.cleanup.port }}
protocol: TCP
volumeMounts:
- name: containerd-config
mountPath: {{ .Values.spegel.containerdRegistryConfigPath }}
volumes:
- name: containerd-config
hostPath:
path: {{ .Values.spegel.containerdRegistryConfigPath }}
type: DirectoryOrCreate
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "spegel.fullname" . }}-cleanup
namespace: {{ include "spegel.namespace" . }}
labels:
app.kubernetes.io/component: cleanup
{{- include "spegel.labels" . | nindent 4 }}
annotations:
helm.sh/hook: "post-delete"
helm.sh/hook-delete-policy: "before-hook-creation, hook-succeeded"
helm.sh/hook-weight: "0"
spec:
selector:
app.kubernetes.io/component: cleanup
{{- include "spegel.selectorLabels" . | nindent 4 }}
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: readiness
port: {{ .Values.service.cleanup.port }}
protocol: TCP
---
apiVersion: v1
kind: Pod
metadata:
name: {{ include "spegel.fullname" . }}-cleanup-wait
namespace: {{ include "spegel.namespace" . }}
labels:
app.kubernetes.io/component: cleanup-wait
{{- include "spegel.labels" . | nindent 4 }}
annotations:
helm.sh/hook: "post-delete"
helm.sh/hook-delete-policy: "before-hook-creation, hook-succeeded"
helm.sh/hook-weight: "1"
spec:
containers:
- name: cleanup-wait
image: "{{ include "spegel.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- cleanup-wait
- --probe-endpoint={{ include "spegel.fullname" . }}-cleanup.{{ include "spegel.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.cleanup.port }}
restartPolicy: Never
terminationGracePeriodSeconds: 0
{{- end }}

View File

@ -9,3 +9,31 @@ metadata:
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "spegel.fullname" . }}
namespace: {{ include "spegel.namespace" . }}
labels:
{{- include "spegel.labels" . | nindent 4 }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "spegel.fullname" . }}
namespace: {{ include "spegel.namespace" . }}
labels:
{{- include "spegel.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "spegel.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "spegel.serviceAccountName" . }}
namespace: {{ include "spegel.namespace" . }}

View File

@ -24,7 +24,7 @@ metadata:
{{- include "spegel.labels" . | nindent 4 }}
{{- if .Values.service.registry.topologyAwareHintsEnabled }}
annotations:
service.kubernetes.io/topology-mode: "auto"
service.kubernetes.io/topology-aware-hints: auto
{{- end }}
spec:
type: NodePort
@ -36,20 +36,3 @@ spec:
targetPort: registry
nodePort: {{ .Values.service.registry.nodePort }}
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "spegel.fullname" . }}-bootstrap
namespace: {{ include "spegel.namespace" . }}
labels:
{{- include "spegel.labels" . | nindent 4 }}
spec:
selector:
{{- include "spegel.selectorLabels" . | nindent 4 }}
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: router
port: {{ .Values.service.router.port }}
protocol: TCP

View File

@ -18,12 +18,4 @@ spec:
- port: metrics
interval: {{ .Values.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -1,40 +0,0 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "spegel.fullname" . }}
namespace: {{ include "spegel.namespace" . }}
labels:
{{- include "spegel.labels" . | nindent 4 }}
spec:
{{- with .Values.verticalPodAutoscaler.recommenders }}
recommenders:
{{- toYaml . | nindent 4 }}
{{- end }}
resourcePolicy:
containerPolicies:
- containerName: registry
{{- with .Values.verticalPodAutoscaler.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.controlledValues }}
controlledValues: {{ .Values.verticalPodAutoscaler.controlledValues }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.maxAllowed }}
maxAllowed:
{{- toYaml .Values.verticalPodAutoscaler.maxAllowed | nindent 8 }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.minAllowed }}
minAllowed:
{{- toYaml .Values.verticalPodAutoscaler.minAllowed | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: DaemonSet
name: {{ include "spegel.fullname" . }}
{{- with .Values.verticalPodAutoscaler.updatePolicy }}
updatePolicy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -8,6 +8,7 @@ image:
# -- Image digest.
digest: ""
# -- Image Pull Secrets
imagePullSecrets: []
# -- Overrides the name of the chart.
@ -31,17 +32,17 @@ podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
# -- The number of old history to retain to allow rollback.
revisionHistoryLimit: 10
# -- Security context for the Spegel container.
securityContext:
readOnlyRootFilesystem: true
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
registry:
# -- Override the NODE_ID environment variable. It defaults to the field status.hostIP
nodeIp: ""
# -- Port to expose the registry via the service.
port: 5000
# -- Node port to expose the registry via the service.
@ -56,16 +57,19 @@ service:
metrics:
# -- Port to expose the metrics via the service.
port: 9090
cleanup:
# -- Port to expose cleanup probe on.
port: 8080
# -- Resource requests and limits for the Spegel container.
resources:
requests:
memory: 128Mi
limits:
memory: 128Mi
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- Node selector for pod assignment.
nodeSelector:
@ -90,12 +94,6 @@ tolerations:
# -- Affinity settings for pod assignment.
affinity: {}
# -- Common labels to apply to all rendered resources.
commonLabels: {}
# -- Domain configured for service domain names.
clusterDomain: cluster.local.
serviceMonitor:
# -- If true creates a Prometheus Service Monitor.
enabled: false
@ -105,42 +103,29 @@ serviceMonitor:
scrapeTimeout: 30s
# -- Service monitor specific labels for prometheus to discover servicemonitor.
labels: {}
# -- List of relabeling rules to apply the targets metadata labels.
relabelings: []
# -- List of relabeling rules to apply to the samples before ingestion.
metricRelabelings: []
grafanaDashboard:
# -- If true creates a Grafana dashboard.
enabled: false
# -- Label that ConfigMaps should have to be loaded as dashboards.
sidecarLabel: "grafana_dashboard"
# -- Label value that ConfigMaps should have to be loaded as dashboards.
sidecarLabelValue: "1"
# -- Annotations that ConfigMaps can have to get configured in Grafana,
# See: sidecar.dashboards.folderAnnotation for specifying the dashboard folder.
# https://github.com/grafana/helm-charts/tree/main/charts/grafana
annotations: {}
# -- Priority class name to use for the pod.
priorityClassName: system-node-critical
# -- Name of secret containing basic authentication credentials for registry.
basicAuthSecretName: ""
spegel:
# -- Minimum log level to output. Value should be DEBUG, INFO, WARN, or ERROR.
logLevel: "INFO"
# -- Registries for which mirror configuration will be created. Empty means all registires will be mirrored.
mirroredRegistries: []
# - https://docker.io
# - https://ghcr.io
# -- Registries for which mirror configuration will be created.
registries:
- https://cgr.dev
- https://docker.io
- https://ghcr.io
- https://quay.io
- https://mcr.microsoft.com
- https://public.ecr.aws
- https://gcr.io
- https://registry.k8s.io
- https://k8s.gcr.io
- https://lscr.io
# -- Additional target mirror registries other than Spegel.
additionalMirrorTargets: []
# -- Max amount of mirrors to attempt.
additionalMirrorRegistries: []
# -- Max ammount of mirrors to attempt.
mirrorResolveRetries: 3
# -- Max duration spent finding a mirror.
mirrorResolveTimeout: "20ms"
mirrorResolveTimeout: "5s"
# -- Path to Containerd socket.
containerdSock: "/run/containerd/containerd.sock"
# -- Containerd namespace where images are stored.
@ -151,43 +136,13 @@ spegel:
containerdContentPath: "/var/lib/containerd/io.containerd.content.v1.content"
# -- If true Spegel will add mirror configuration to the node.
containerdMirrorAdd: true
# -- Path to Kubeconfig credentials, should only be set if Spegel is run in an environment without RBAC.
kubeconfigPath: ""
# -- When true Spegel will resolve tags to digests.
resolveTags: true
# -- When true latest tags will be resolved to digests.
resolveLatestTag: true
# -- When true existing mirror configuration will be kept and Spegel will prepend it's configuration.
prependExisting: false
# -- When true enables debug web page.
debugWebEnabled: false
verticalPodAutoscaler:
# -- If true creates a Vertical Pod Autoscaler.
enabled: false
# -- Recommender responsible for generating recommendation for the object.
# List should be empty (then the default recommender will generate the recommendation)
# or contain exactly one recommender.
recommenders: []
# - name: custom-recommender-performance
# -- List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# -- Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
controlledValues: RequestsAndLimits
# -- Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 100m
# memory: 128Mi
# -- Define the min allowed resources for the pod
minAllowed: {}
# cpu: 100m
# memory: 128Mi
updatePolicy:
# -- Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
minReplicas: 2
# -- Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
# -- Maximum write speed per request when serving blob layers. Should be an integer followed by unit Bps, KBps, MBps, GBps, or TBps.
blobSpeed: ""
# -- When true existing mirror configuration will be appended to instead of replaced.
appendMirrors: false

77
docs/COMPATIBILITY.md Normal file
View File

@ -0,0 +1,77 @@
# Compatibility
Currently, Spegel only works with Containerd, in the future other container runtime interfaces may be supported. Spegel relies on [Containerd registry mirroring](https://github.com/containerd/containerd/blob/main/docs/hosts.md#cri) to route requests to the correct destination.
This requires Containerd to be properly configured, if it is not Spegel will exit. First of all the registry config path needs to be set, this is not done by default in Containerd. Second of all discarding unpacked layers cannot be enabled.
Some Kubernetes flavors come with this setting out of the box, while others do not. Spegel is not able to write this configuration for you as it requires a restart of Containerd to take effect.
```toml
version = 2
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".containerd]
discard_unpacked_layers = false
```
# Kubernetes
Spegel has been tested on the following Kubernetes distributions for compatibility. Green status means Spegel will work out of the box, yellow will require additional configuration, and red means that Spegel will not work.
| Status | Distribution |
| --- | --- |
| :green_circle: | AKS |
| :green_circle: | Minikube |
| :yellow_circle: | EKS |
| :yellow_circle: | K3S |
| :yellow_circle: | Talos |
| :red_circle: | GKE |
| :red_circle: | DigitalOcean |
## EKS
Discard unpacked layers is enabled by default, meaning that layers that are not required for the container runtime will be removed after consumed.
This needs to be disabled as otherwise all of the required layers of an image would not be present on the node.
The best way to change Containerd settings in EKS is to add the configuration to the import directory using a custom node bootstrap script.
```shell
#!/bin/bash
set -ex
mkdir -p /etc/containerd/config.d
cat > /etc/containerd/config.d/spegel.toml << EOL
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".containerd]
discard_unpacked_layers = false
EOL
/etc/eks/bootstrap.sh
```
## K3S
K3S embeds Spegel, refer to their [documentation](https://docs.k3s.io/installation/registry-mirror?_highlight=spegel) for deployment information.
## Talos
Talos comes with Pod Security Admission [pre-configured](https://www.talos.dev/latest/kubernetes-guides/configuration/pod-security/). The default profile is too restrictive and needs to be changed to privileged.
```shell
kubectl label namespace spegel pod-security.kubernetes.io/enforce=privileged
```
Talos also uses a different path as its Containerd registry config path.
```yaml
spegel:
containerdRegistryConfigPath: /etc/cri/conf.d/hosts
```
## GKE
GKE does not set the registry config path in its Containerd configuration. On top of that it uses the old mirror configuration for the internal mirroring service.
## DigitalOcean
DigitalOcean does not set the registry config path in its Containerd configuration.

114
docs/FAQ.md Normal file
View File

@ -0,0 +1,114 @@
# FAQ
Frequently asked questions, please read these before creating a new issue.
## Can I use Spegel in production?
Spegel is being used by multiple users in production for over a year without any major issues. The great thing is that pulling images would not stop working if you for some reason would find an issue with Spegel.
A fallback to the original registry will always occur if Spegel can't be reached or serve the requested image.
## How do I know that Spegel is working?
Spegel is meant to be a painless experience to install, meaning that it may be difficult initially to know if things are working or not. Simply put a good indicator that things are working is if all Spegel pods have started and are in a ready state.
Spegel does a couple of checks on startup to verify that any required configuration is correct, if it is not it will exit with an error. While it runs it will log all received requests, both those it mirrors and it serves.
An incoming request to Spegel that is mirrored will receive the following log.
```
{"level":"info","ts":1692304805.9038486,"caller":"gin@v0.0.9/logger.go:53","msg":"","path":"/v2/library/nginx/blobs/sha256:1cb127bd932119089b5ffb612ffa84537ddd1318e6784f2fce80916bbb8bd166","status":200,"method":"GET","latency":0.005075836,"ip":"172.18.0.5","handler":"mirror"}
```
While the Spegel instance on the other end will log.
```
{"level":"info","ts":1692304805.9035861,"caller":"gin@v0.0.9/logger.go:53","msg":"","path":"/v2/library/nginx/blobs/sha256:1cb127bd932119089b5ffb612ffa84537ddd1318e6784f2fce80916bbb8bd166","status":200,"method":"GET","latency":0.003644997,"ip":"172.18.0.5","handler":"blob"}
```
## Why am I not able to pull the new version of my tagged image?
Reusing the same tag multiple times for different versions of an image is generally a bad idea. The most common scenario is the use of the `latest` tag. This makes it difficult to determine which version of the image is being used. On top of that, the image will not be updated if it is already cached on the node.
Some people have chosen to power forward with reusing tags and chosen to instead set the image pull policy to `AlwaysPull`, forcing the image to be updated every time a pod is started. This will however not work with Spegel as the tag could be resolved by another node in the cluster resulting in the same "old" image being pulled.
There are two solutions to work around this problem, allowing users to continue with their way of working before using Spegel.
The best and preferable solution is to deploy [k8s-digester](https://github.com/google/k8s-digester) alongside Spegel. This will allow you to enjoy all the benefits of Spegel will continuously updating image tag versions. The way it works is that k8s-digester will, for each pod created, resolve tags to image digests and add them to the image reference.
This means that all pods that originally reference images by tag will instead do so with digest. This means that k8s-digester will resolve the new digest for a tag if a new version is pushed to the registry. Using k8s-digester means that tags will be updated while using Spegel to distribute the layers between nodes. It also means that Spegel will be able
to continue distributing images if the external registry became unavailable. The reason this works is that the mutating webhook is configured to ignore errors, and instead, Spegel will be used to resolve the tag to a digest.
One caveat when deploying k8s-digester is that it will by default modify both pods but also any other parent resource that creates pods. This in turn has the side effect of only setting the
digest once when the parent resource is created, and never again. For that reason it is a good idea to modify the mutating webhook to only include pods, that way the digest will be
updated every time a new pod is created.
```yaml
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: digester-mutating-webhook-configuration
labels:
control-plane: controller-manager
digester/operation: webhook
digester/system: "yes"
webhooks:
- name: digester-webhook-service.digester-system.svc
admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: digester-webhook-service
namespace: digester-system
path: /v1/mutate
caBundle: Cg==
failurePolicy: Ignore # kpt-set: ${failure-policy}
namespaceSelector:
matchLabels:
digest-resolution: enabled
reinvocationPolicy: IfNeeded
rules:
- resources:
- pods
apiGroups:
- ''
apiVersions:
- v1
operations:
- CREATE
- UPDATE
scope: Namespaced
sideEffects: None
timeoutSeconds: 15
```
The second option, which should be used only if using k8s-digester is not possible, is to disable tag resolving altogether in Spegel. There are two options when doing this. It can either be disabled only for `latest` tags or for all tags.
This can be done by changing the Helm charts values from their defaults.
```yaml
spegel:
resolveTags: false
resolveLatestTag: false
```
Please note that this does however remove Spegel's ability to protect against registry outages for any images referenced by tags.
## Why am I able to pull private images without image pull secrets?
An image pulled by a Kubernetes node is cached locally on disk. Meaning that other pods running on the same node that require the same image do not have to pull the same image again. Spegel relies on this mechanism to be able to distribute images.
This may however not be a desirable feature when running a multi-tenant cluster where private images are pulled using credentials. In this scenario, only those pods with the correct credentials would be able to use the image.
Ownership of private images has been an issue for a long time in Kubernetes as indicated by the unresolved issue https://github.com/kubernetes/kubernetes/issues/18787 created back in 2015. The short answer is that a good solution does not exist, with or without Spegel.
The current [suggested solution](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) is to enforce an `AlwaysPull` image policy for private images that require authentication. Doing so will force a request to the registry to
validate the digest or resolve the tag. This request will only succeed with the proper authentication. This is a mediocre solution at best as it creates a hard dependency on the external registry, meaning the pod will not be able to start even if the image is cached on the node.
This solution does however not work when using Spegel, instead, Spegel may make the problem worse. Without Spegel an image that would want to use a private image, it does not have access to would have to be scheduled on a node that has already pulled the image.
With Spegel that image will be available to all nodes in the cluster. Currently, a good solution for Spegel does not exist. There are two reasons for this. The first is that credentials are not included when pulling an image from a registry mirror, a good choice as doing so would mean sharing credentials with third parties.
Additionally, Spegel would have no method of validating the credentials even if they were included in the requests. So for the time being if you have these types of requirements Spegel may not be the choice for you.
## How do I use Spegel in conjunction with another registry cache?
Spegel can be used with other registry caches in cases where the best effort caching offered by Spegel is not enough. In these situations, if the image is not cached within the cluster the image should be pulled from the secondary cache.
This is configured by adding the domain of the registry to the `additionalMirrorRegistries` list in the Helm values. Registries added to this list will be included in the mirror configuration created by Spegel.
```yaml
spegel:
additionalMirrorRegistries:
- https://zot.example.com
```

13
docs/METRICS.md Normal file
View File

@ -0,0 +1,13 @@
# Metrics
| Name| Type | Labels |
| ---------- | ----------- | ----------- |
| spegel_advertised_images | Gauge | `registry` |
| spegel_resolve_duration_seconds | Histogram | `router` |
| spegel_advertised_keys | Gauge | `registry` |
| spegel_advertised_image_tags | Gauge | `registry` |
| spegel_advertised_image_digests | Gauge | `registry` |
| spegel_mirror_requests_total | Counter | `registry` <br/> `cache=hit\|miss` <br/> `source=internal\|external` |
| http_request_duration_seconds | Histogram | `handler` <br/> `method` <br/> `code` |
| http_response_size_bytes | Histogram | `handler` <br/> `method` <br/> `code` |
| http_requests_inflight | Gauge | `handler` |

262
go.mod
View File

@ -1,109 +1,120 @@
module github.com/spegel-org/spegel
go 1.24.0
toolchain go1.24.3
go 1.21
require (
cuelabs.dev/go/oci/ociregistry v0.0.0-20250530080122-d0efc28a5723
github.com/alexflint/go-arg v1.5.1
github.com/containerd/containerd/api v1.9.0
github.com/containerd/containerd/v2 v2.1.1
github.com/containerd/errdefs v1.0.0
github.com/containerd/typeurl/v2 v2.2.3
github.com/go-logr/logr v1.4.3
github.com/ipfs/go-cid v0.5.0
github.com/libp2p/go-libp2p v0.41.1
github.com/libp2p/go-libp2p-kad-dht v0.33.1
github.com/multiformats/go-multiaddr v0.16.0
github.com/multiformats/go-multicodec v0.9.1
github.com/alexflint/go-arg v1.4.3
github.com/containerd/containerd v1.7.15
github.com/containerd/typeurl/v2 v2.1.1
github.com/go-logr/logr v1.4.1
github.com/go-logr/zapr v1.3.0
github.com/ipfs/go-cid v0.4.1
github.com/libp2p/go-libp2p v0.33.2
github.com/libp2p/go-libp2p-kad-dht v0.25.2
github.com/multiformats/go-multiaddr v0.12.3
github.com/multiformats/go-multicodec v0.9.0
github.com/multiformats/go-multihash v0.2.3
github.com/norwoodj/helm-docs v1.12.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.1
github.com/pelletier/go-toml/v2 v2.2.4
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.64.0
github.com/stretchr/testify v1.10.0
go.etcd.io/bbolt v1.4.1
golang.org/x/sync v0.15.0
google.golang.org/grpc v1.73.0
k8s.io/apimachinery v0.33.1
k8s.io/cri-api v0.33.1
k8s.io/klog/v2 v2.130.1
github.com/opencontainers/image-spec v1.1.0
github.com/pelletier/go-toml/v2 v2.2.0
github.com/prometheus/client_golang v1.19.0
github.com/spf13/afero v1.11.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.9
go.uber.org/zap v1.27.0
golang.org/x/sync v0.7.0
golang.org/x/time v0.5.0
k8s.io/client-go v0.28.8
k8s.io/cri-api v0.28.8
k8s.io/klog/v2 v2.100.1
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.1 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.13.0 // indirect
github.com/Masterminds/semver/v3 v3.2.0 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/alexflint/go-scalar v1.2.0 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/containerd/cgroups/v3 v3.0.5 // indirect
github.com/containerd/continuity v0.4.5 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/continuity v0.4.2 // indirect
github.com/containerd/fifo v1.1.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v1.0.0-rc.1 // indirect
github.com/containerd/plugin v1.0.0 // indirect
github.com/containerd/ttrpc v1.2.7 // indirect
github.com/containerd/ttrpc v1.2.3 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/elastic/gosigar v0.14.3 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/boxo v0.30.0 // indirect
github.com/ipfs/go-datastore v0.8.2 // indirect
github.com/ipfs/go-log/v2 v2.6.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipfs/boxo v0.10.0 // indirect
github.com/ipfs/go-datastore v0.6.0 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect
github.com/ipld/go-ipld-prime v0.20.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koron/go-ssdp v0.0.5 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.6 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kbucket v0.7.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect
github.com/libp2p/go-libp2p-record v0.2.0 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/miekg/dns v1.1.58 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
@ -111,90 +122,81 @@ require (
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/signal v0.7.1 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multistream v0.6.0 // indirect
github.com/multiformats/go-multistream v0.5.0 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/norwoodj/helm-docs v1.14.2 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/runtime-spec v1.2.1 // indirect
github.com/opencontainers/selinux v1.12.0 // indirect
github.com/onsi/ginkgo/v2 v2.15.0 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.4 // indirect
github.com/pion/ice/v4 v4.0.8 // indirect
github.com/pion/interceptor v0.1.39 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.18 // indirect
github.com/pion/sctp v1.8.37 // indirect
github.com/pion/sdp/v3 v3.0.10 // indirect
github.com/pion/srtp/v3 v3.0.4 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.0 // indirect
github.com/pion/webrtc/v4 v4.0.10 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.50.1 // indirect
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/quic-go v0.42.0 // indirect
github.com/quic-go/webtransport-go v0.6.0 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.14.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/cast v1.5.1 // indirect
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.16.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.23.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
go.uber.org/dig v1.17.1 // indirect
go.uber.org/fx v1.20.1 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect
golang.org/x/mod v0.24.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/tools v0.33.0 // indirect
gonum.org/v1/gonum v0.16.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
google.golang.org/protobuf v1.36.6 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
golang.org/x/mod v0.15.0 // indirect
golang.org/x/net v0.21.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.18.0 // indirect
gonum.org/v1/gonum v0.13.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
helm.sh/helm/v3 v3.17.3 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
k8s.io/api v0.28.8 // indirect
k8s.io/apimachinery v0.28.8 // indirect
k8s.io/helm v2.17.0+incompatible // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
tool github.com/norwoodj/helm-docs/cmd/helm-docs

2104
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,145 +0,0 @@
package cleanup
import (
"context"
"errors"
"net"
"net/http"
"net/url"
"time"
"github.com/go-logr/logr"
"golang.org/x/sync/errgroup"
"github.com/spegel-org/spegel/internal/channel"
"github.com/spegel-org/spegel/pkg/httpx"
"github.com/spegel-org/spegel/pkg/oci"
)
func Run(ctx context.Context, addr, configPath string) error {
log := logr.FromContextOrDiscard(ctx)
err := oci.CleanupMirrorConfiguration(ctx, configPath)
if err != nil {
return err
}
g, gCtx := errgroup.WithContext(ctx)
mux := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodGet && req.URL.Path != "/healthz" {
log.Error(errors.New("unknown request"), "unsupported probe request", "path", req.URL.Path, "method", req.Method)
rw.WriteHeader(http.StatusNotFound)
return
}
rw.WriteHeader(http.StatusOK)
})
srv := &http.Server{
Addr: addr,
Handler: mux,
}
g.Go(func() error {
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
return err
}
return nil
})
g.Go(func() error {
<-gCtx.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
return srv.Shutdown(shutdownCtx)
})
log.Info("waiting to be shutdown")
err = g.Wait()
if err != nil {
return err
}
return nil
}
func Wait(ctx context.Context, probeEndpoint string, period time.Duration, threshold int) error {
log := logr.FromContextOrDiscard(ctx)
resolver := &net.Resolver{}
httpClient := httpx.BaseClient()
addr, port, err := net.SplitHostPort(probeEndpoint)
if err != nil {
return err
}
immediateCh := make(chan time.Time, 1)
immediateCh <- time.Now()
close(immediateCh)
ticker := time.NewTicker(period)
defer ticker.Stop()
tickerCh := channel.Merge(immediateCh, ticker.C)
thresholdCount := 0
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tickerCh:
start := time.Now()
log.Info("running probe lookup", "host", addr)
ips, err := resolver.LookupIPAddr(ctx, addr)
if err != nil {
log.Error(err, "cleanup probe lookup failed")
thresholdCount = 0
continue
}
log.Info("running probe request", "endpoints", len(ips))
err = probeIPs(ctx, httpClient, ips, port)
if err != nil {
log.Error(err, "cleanup probe request failed")
thresholdCount = 0
continue
}
thresholdCount += 1
log.Info("probe ran successfully", "threshold", thresholdCount, "duration", time.Since(start).String())
if thresholdCount == threshold {
log.Info("probe threshold reached")
return nil
}
}
}
}
func probeIPs(ctx context.Context, client *http.Client, ips []net.IPAddr, port string) error {
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(10)
for _, ip := range ips {
g.Go(func() error {
u := url.URL{
Scheme: "http",
Host: net.JoinHostPort(ip.String(), port),
Path: "/healthz",
}
reqCtx, cancel := context.WithTimeout(gCtx, 1*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(reqCtx, http.MethodGet, u.String(), nil)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer httpx.DrainAndClose(resp.Body)
err = httpx.CheckResponseStatus(resp, http.StatusOK)
if err != nil {
return err
}
return nil
})
}
err := g.Wait()
if err != nil {
return err
}
return nil
}

View File

@ -1,61 +0,0 @@
package cleanup
import (
"context"
"net"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestCleanupFail(t *testing.T) {
t.Parallel()
srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(http.StatusInternalServerError)
}))
defer srv.Close()
u, err := url.Parse(srv.URL)
require.NoError(t, err)
timeoutCtx, timeoutCancel := context.WithTimeout(t.Context(), 1*time.Second)
defer timeoutCancel()
err = Wait(timeoutCtx, u.Host, 100*time.Millisecond, 3)
require.EqualError(t, err, "context deadline exceeded")
}
func TestCleanupSucceed(t *testing.T) {
t.Parallel()
listener, err := net.Listen("tcp", ":0")
if err != nil {
panic(err)
}
addr := listener.Addr().String()
err = listener.Close()
require.NoError(t, err)
timeoutCtx, timeoutCancel := context.WithTimeout(t.Context(), 1*time.Second)
defer timeoutCancel()
g, gCtx := errgroup.WithContext(timeoutCtx)
g.Go(func() error {
err := Run(gCtx, addr, t.TempDir())
if err != nil {
return err
}
return nil
})
g.Go(func() error {
err := Wait(gCtx, addr, 100*time.Microsecond, 3)
if err != nil {
return err
}
return nil
})
err = g.Wait()
require.NoError(t, err)
}

View File

@ -0,0 +1,30 @@
package kubernetes
import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func GetClientset(kubeconfigPath string) (kubernetes.Interface, error) {
if kubeconfigPath != "" {
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, err
}
return clientset, nil
}
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return clientset, nil
}

17
internal/mux/mux.go Normal file
View File

@ -0,0 +1,17 @@
package mux
import "net/http"
type Handler func(rw ResponseWriter, req *http.Request)
type ServeMux struct {
h Handler
}
func NewServeMux(handler Handler) *ServeMux {
return &ServeMux{h: handler}
}
func (s *ServeMux) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
s.h(&response{ResponseWriter: rw}, req)
}

View File

@ -1,4 +1,4 @@
package httpx
package mux
import (
"bufio"
@ -13,7 +13,6 @@ type ResponseWriter interface {
Error() error
Status() int
Size() int64
SetHandler(handler string)
}
var (
@ -26,7 +25,6 @@ var (
type response struct {
http.ResponseWriter
error error
handler string
status int
size int64
writtenHeader bool
@ -54,13 +52,11 @@ func (r *response) WriteError(statusCode int, err error) {
func (r *response) Flush() {
r.writtenHeader = true
//nolint: errcheck // No method to throw the error.
flusher := r.ResponseWriter.(http.Flusher)
flusher.Flush()
}
func (r *response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
//nolint: errcheck // No method to throw the error.
hijacker := r.ResponseWriter.(http.Hijacker)
return hijacker.Hijack()
}
@ -89,7 +85,3 @@ func (r *response) Error() error {
func (r *response) Size() int64 {
return r.size
}
func (r *response) SetHandler(handler string) {
r.handler = handler
}

View File

@ -0,0 +1,15 @@
package mux
import (
"io"
"net/http"
"testing"
"github.com/stretchr/testify/require"
)
func TestResponseWriter(t *testing.T) {
var rw http.ResponseWriter = &response{}
_, ok := rw.(io.ReaderFrom)
require.True(t, ok)
}

View File

@ -1,124 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Spegel Debug</title>
<link rel="icon" href="https://spegel.dev/favicon.svg" type="image/svg+xml">
<script src="https://unpkg.com/htmx.org@2.0.4"></script>
<style>
body {
margin: 0;
padding: 0;
font-family: ui-sans-serif, system-ui, sans-serif, "apple color emoji", "segoe ui emoji", "segoe ui symbol", "noto color emoji";
font-size: 16px;
}
.container {
max-width: 1366px;
width: 100%;
margin: 0 auto;
padding: 0 20px;
}
.table-container {
max-width: 100%;
overflow-x: auto;
}
table {
width: 100%;
border-collapse: collapse;
}
th,
td {
text-align: left;
padding: 8px;
border: 1px solid #ddd;
}
th {
background-color: #f4f4f4;
font-weight: bold;
}
.stat-container {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
gap: 16px;
width: 100%;
margin-bottom: 16px;
}
.stat-box {
padding: 16px;
border-radius: 8px;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
text-align: center;
}
.stat-title {
font-size: 14px;
color: #555;
}
.stat-value {
font-size: 24px;
font-weight: bold;
margin-top: 8px;
}
.measure-container {
padding: 16px;
border-radius: 8px;
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
}
input[type="text"],
button {
font-size: 16px;
height: 2em;
padding: 0 8px;
border: 1px solid #ccc;
}
input[type="text"] {
width: 100%;
max-width: 450px;
}
button {
background-color: #1d5a9a;
color: white;
border: none;
cursor: pointer;
padding: 0 12px;
}
button:hover {
background-color: #164577;
}
</style>
</head>
<body>
<div class="container">
<h1>Spegel</h1>
<div hx-get="/debug/web/stats" hx-trigger="load, every 2s"></div>
<div class="measure-container">
<h2>Measure Image Pull</h2>
<form hx-get="/debug/web/measure" hx-target="#measure-result">
<input type="text" name="image" placeholder="ghcr.io/spegel-org/spegel:v0.30.0" />
<button>Pull</button>
</form>
<div id="measure-result"></div>
</div>
</div>
</body>
</html>

View File

@ -1,48 +0,0 @@
{{ if .PeerResults }}
<h3>Resolved Peers</h3>
<div style="margin-bottom: 10px;">
<strong>Duration:</strong> {{ .PeerDuration | formatDuration }}
</div>
<div class="table-container">
<table>
<tr>
<th style="width: 50%;">Peer</th>
<th style="width: 50%;">Duration</th>
</tr>
{{ range .PeerResults }}
<tr>
<td>{{ .Peer.Addr }}</td>
<td>{{ .Duration | formatDuration }}</td>
</tr>
{{ end }}
</table>
</div>
<h3>Result</h3>
<div style="margin-bottom: 10px;">
<strong>Duration:</strong> {{ .PullDuration | formatDuration }}
<strong>Size:</strong> {{ .PullSize | formatBytes }}
</div>
<div class="table-container">
<table>
<tr>
<th>Identifier</th>
<th>Type</th>
<th>Size</th>
<th>Duration</th>
</tr>
{{ range .PullResults }}
<tr>
<td>{{ .Identifier }}</td>
<td>{{ .Type }}</td>
<td>{{ .Size | formatBytes }}</td>
<td>{{ .Duration | formatDuration }}</td>
</tr>
{{ end }}
</table>
</div>
{{ else }}
<p>No peers found for image</p>
{{ end }}

View File

@ -1,12 +0,0 @@
<div>
<div class="stat-container">
<div class="stat-box">
<div class="stat-title">Images</div>
<div class="stat-value">{{ .ImageCount }}</div>
</div>
<div class="stat-box">
<div class="stat-title">Layers</div>
<div class="stat-value">{{ .LayerCount }}</div>
</div>
</div>
</div>

View File

@ -1,220 +0,0 @@
package web
import (
"embed"
"errors"
"fmt"
"html/template"
"net"
"net/http"
"net/netip"
"net/url"
"time"
"github.com/go-logr/logr"
"github.com/prometheus/common/expfmt"
"github.com/spegel-org/spegel/pkg/httpx"
"github.com/spegel-org/spegel/pkg/oci"
"github.com/spegel-org/spegel/pkg/routing"
)
//go:embed templates/*
var templatesFS embed.FS
type Web struct {
router routing.Router
ociClient *oci.Client
httpClient *http.Client
tmpls *template.Template
}
func NewWeb(router routing.Router, ociClient *oci.Client) (*Web, error) {
funcs := template.FuncMap{
"formatBytes": formatBytes,
"formatDuration": formatDuration,
}
tmpls, err := template.New("").Funcs(funcs).ParseFS(templatesFS, "templates/*")
if err != nil {
return nil, err
}
return &Web{
router: router,
ociClient: ociClient,
httpClient: httpx.BaseClient(),
tmpls: tmpls,
}, nil
}
func (w *Web) Handler(log logr.Logger) http.Handler {
m := httpx.NewServeMux(log)
m.Handle("GET /debug/web/", w.indexHandler)
m.Handle("GET /debug/web/stats", w.statsHandler)
m.Handle("GET /debug/web/measure", w.measureHandler)
return m
}
func (w *Web) indexHandler(rw httpx.ResponseWriter, req *http.Request) {
err := w.tmpls.ExecuteTemplate(rw, "index.html", nil)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
}
func (w *Web) statsHandler(rw httpx.ResponseWriter, req *http.Request) {
//nolint: errcheck // Ignore error.
srvAddr := req.Context().Value(http.LocalAddrContextKey).(net.Addr)
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, fmt.Sprintf("http://%s/metrics", srvAddr.String()), nil)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
resp, err := w.httpClient.Do(req)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
defer httpx.DrainAndClose(resp.Body)
parser := expfmt.TextParser{}
metricFamilies, err := parser.TextToMetricFamilies(resp.Body)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
data := struct {
ImageCount int64
LayerCount int64
}{}
for _, metric := range metricFamilies["spegel_advertised_images"].Metric {
data.ImageCount += int64(*metric.Gauge.Value)
}
for _, metric := range metricFamilies["spegel_advertised_keys"].Metric {
data.LayerCount += int64(*metric.Gauge.Value)
}
err = w.tmpls.ExecuteTemplate(rw, "stats.html", data)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
}
type measureResult struct {
PeerResults []peerResult
PullResults []pullResult
PeerDuration time.Duration
PullDuration time.Duration
PullSize int64
}
type peerResult struct {
Peer netip.AddrPort
Duration time.Duration
}
type pullResult struct {
Identifier string
Type string
Size int64
Duration time.Duration
}
func (w *Web) measureHandler(rw httpx.ResponseWriter, req *http.Request) {
mirror := &url.URL{
Scheme: "http",
Host: "localhost:5000",
}
// Parse image name.
imgName := req.URL.Query().Get("image")
if imgName == "" {
rw.WriteError(http.StatusBadRequest, errors.New("image name cannot be empty"))
return
}
img, err := oci.ParseImage(imgName)
if err != nil {
rw.WriteError(http.StatusBadRequest, err)
return
}
res := measureResult{}
// Resolve peers for the given image.
resolveStart := time.Now()
peerCh, err := w.router.Resolve(req.Context(), imgName, 0)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
for peer := range peerCh {
d := time.Since(resolveStart)
res.PeerDuration += d
res.PeerResults = append(res.PeerResults, peerResult{
Peer: peer,
Duration: d,
})
}
if len(res.PeerResults) > 0 {
// Pull the image and measure performance.
pullMetrics, err := w.ociClient.Pull(req.Context(), img, oci.WithFetchMirror(mirror))
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
for _, metric := range pullMetrics {
res.PullDuration += metric.Duration
res.PullSize += metric.ContentLength
res.PullResults = append(res.PullResults, pullResult{
Identifier: metric.Digest.String(),
Type: metric.ContentType,
Size: metric.ContentLength,
Duration: metric.Duration,
})
}
}
err = w.tmpls.ExecuteTemplate(rw, "measure.html", res)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
}
func formatBytes(size int64) string {
const unit = 1024
if size < unit {
return fmt.Sprintf("%d B", size)
}
div, exp := int64(unit), 0
for n := size / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(size)/float64(div), "KMGTPE"[exp])
}
func formatDuration(d time.Duration) string {
if d < time.Millisecond {
return "<1ms"
}
totalMs := int64(d / time.Millisecond)
minutes := totalMs / 60000
seconds := (totalMs % 60000) / 1000
milliseconds := totalMs % 1000
out := ""
if minutes > 0 {
out += fmt.Sprintf("%dm", minutes)
}
if seconds > 0 {
out += fmt.Sprintf("%ds", seconds)
}
if milliseconds > 0 {
out += fmt.Sprintf("%dms", milliseconds)
}
return out
}

View File

@ -1,80 +0,0 @@
package web
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestWeb(t *testing.T) {
t.Parallel()
w, err := NewWeb(nil, nil)
require.NoError(t, err)
require.NotNil(t, w.tmpls)
}
func TestFormatBytes(t *testing.T) {
t.Parallel()
tests := []struct {
expected string
size int64
}{
{
size: 1,
expected: "1 B",
},
{
size: 19456,
expected: "19.0 KB",
},
{
size: 1073741824,
expected: "1.0 GB",
},
}
for _, tt := range tests {
t.Run(tt.expected, func(t *testing.T) {
t.Parallel()
result := formatBytes(tt.size)
require.Equal(t, tt.expected, result)
})
}
}
func TestDuration(t *testing.T) {
t.Parallel()
tests := []struct {
expected string
duration time.Duration
}{
{
duration: 36 * time.Millisecond,
expected: "36ms",
},
{
duration: 5 * time.Microsecond,
expected: "<1ms",
},
{
duration: 5*time.Minute + 128*time.Second,
expected: "7m8s",
},
{
duration: 2 * time.Hour,
expected: "120m",
},
}
for _, tt := range tests {
t.Run(tt.expected, func(t *testing.T) {
t.Parallel()
result := formatDuration(tt.duration)
require.Equal(t, tt.expected, result)
})
}
}

276
main.go
View File

@ -4,100 +4,87 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/alexflint/go-arg"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/afero"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"k8s.io/klog/v2"
"github.com/spegel-org/spegel/internal/cleanup"
"github.com/spegel-org/spegel/internal/web"
"github.com/spegel-org/spegel/internal/kubernetes"
"github.com/spegel-org/spegel/pkg/metrics"
"github.com/spegel-org/spegel/pkg/oci"
"github.com/spegel-org/spegel/pkg/registry"
"github.com/spegel-org/spegel/pkg/routing"
"github.com/spegel-org/spegel/pkg/state"
"github.com/spegel-org/spegel/pkg/throttle"
)
type ConfigurationCmd struct {
ContainerdRegistryConfigPath string `arg:"--containerd-registry-config-path,env:CONTAINERD_REGISTRY_CONFIG_PATH" default:"/etc/containerd/certs.d" help:"Directory where mirror configuration is written."`
MirroredRegistries []url.URL `arg:"--mirrored-registries,env:MIRRORED_REGISTRIES" help:"Registries that are configured to be mirrored, if slice is empty all registires are mirrored."`
MirrorTargets []url.URL `arg:"--mirror-targets,env:MIRROR_TARGETS,required" help:"registries that are configured to act as mirrors."`
Registries []url.URL `arg:"--registries,required,env:REGISTRIES" help:"registries that are configured to be mirrored."`
MirrorRegistries []url.URL `arg:"--mirror-registries,env:MIRROR_REGISTRIES,required" help:"registries that are configured to act as mirrors."`
ResolveTags bool `arg:"--resolve-tags,env:RESOLVE_TAGS" default:"true" help:"When true Spegel will resolve tags to digests."`
PrependExisting bool `arg:"--prepend-existing,env:PREPEND_EXISTING" default:"false" help:"When true existing mirror configuration will be kept and Spegel will prepend it's configuration."`
AppendMirrors bool `arg:"--append-mirrors,env:APPEND_MIRRORS" default:"false" help:"When true existing mirror configuration will be appended to instead of replaced."`
}
type BootstrapConfig struct {
BootstrapKind string `arg:"--bootstrap-kind,env:BOOTSTRAP_KIND" help:"Kind of bootsrapper to use."`
DNSBootstrapDomain string `arg:"--dns-bootstrap-domain,env:DNS_BOOTSTRAP_DOMAIN" help:"Domain to use when bootstrapping using DNS."`
HTTPBootstrapAddr string `arg:"--http-bootstrap-addr,env:HTTP_BOOTSTRAP_ADDR" help:"Address to serve for HTTP bootstrap."`
HTTPBootstrapPeer string `arg:"--http-bootstrap-peer,env:HTTP_BOOTSTRAP_PEER" help:"Peer to HTTP bootstrap with."`
StaticBootstrapPeers []string `arg:"--static-bootstrap-peers,env:STATIC_BOOTSTRAP_PEERS" help:"Static list of peers to bootstrap with."`
BootstrapKind string `arg:"--bootstrap-kind,env:BOOTSTRAP_KIND" help:"Kind of bootsrapper to use."`
HTTPBootstrapAddr string `arg:"--http-bootstrap-addr,env:HTTP_BOOTSTRAP_KIND" help:"Address to serve for HTTP bootstrap."`
HTTPBootstrapPeer string `arg:"--http-bootstrap-peer,env:HTTP_BOOTSTRAP_PEER" help:"Peer to HTTP bootstrap with."`
KubeconfigPath string `arg:"--kubeconfig-path,env:KUBECONFIG_PATH" help:"Path to the kubeconfig file."`
LeaderElectionName string `arg:"--leader-election-name,env:LEADER_ELECTION_NAME" default:"spegel-leader-election" help:"Name of leader election."`
LeaderElectionNamespace string `arg:"--leader-election-namespace,env:LEADER_ELECTION_NAMESPACE" default:"spegel" help:"Kubernetes namespace to write leader election data."`
}
type RegistryCmd struct {
BootstrapConfig
ContainerdRegistryConfigPath string `arg:"--containerd-registry-config-path,env:CONTAINERD_REGISTRY_CONFIG_PATH" default:"/etc/containerd/certs.d" help:"Directory where mirror configuration is written."`
MetricsAddr string `arg:"--metrics-addr,env:METRICS_ADDR" default:":9090" help:"address to serve metrics."`
ContainerdSock string `arg:"--containerd-sock,env:CONTAINERD_SOCK" default:"/run/containerd/containerd.sock" help:"Endpoint of containerd service."`
ContainerdNamespace string `arg:"--containerd-namespace,env:CONTAINERD_NAMESPACE" default:"k8s.io" help:"Containerd namespace to fetch images from."`
ContainerdContentPath string `arg:"--containerd-content-path,env:CONTAINERD_CONTENT_PATH" default:"/var/lib/containerd/io.containerd.content.v1.content" help:"Path to Containerd content store"`
DataDir string `arg:"--data-dir,env:DATA_DIR" default:"/var/lib/spegel" help:"Directory where Spegel persists data."`
RouterAddr string `arg:"--router-addr,env:ROUTER_ADDR" default:":5001" help:"address to serve router."`
RegistryAddr string `arg:"--registry-addr,env:REGISTRY_ADDR" default:":5000" help:"address to server image registry."`
MirroredRegistries []url.URL `arg:"--mirrored-registries,env:MIRRORED_REGISTRIES" help:"Registries that are configured to be mirrored, if slice is empty all registires are mirrored."`
MirrorResolveTimeout time.Duration `arg:"--mirror-resolve-timeout,env:MIRROR_RESOLVE_TIMEOUT" default:"20ms" help:"Max duration spent finding a mirror."`
MirrorResolveRetries int `arg:"--mirror-resolve-retries,env:MIRROR_RESOLVE_RETRIES" default:"3" help:"Max amount of mirrors to attempt."`
ResolveLatestTag bool `arg:"--resolve-latest-tag,env:RESOLVE_LATEST_TAG" default:"true" help:"When true latest tags will be resolved to digests."`
DebugWebEnabled bool `arg:"--debug-web-enabled,env:DEBUG_WEB_ENABLED" default:"false" help:"When true enables debug web page."`
}
type CleanupCmd struct {
Addr string `arg:"--addr,required,env:ADDR" help:"address to run readiness probe on."`
ContainerdRegistryConfigPath string `arg:"--containerd-registry-config-path,env:CONTAINERD_REGISTRY_CONFIG_PATH" default:"/etc/containerd/certs.d" help:"Directory where mirror configuration is written."`
}
type CleanupWaitCmd struct {
ProbeEndpoint string `arg:"--probe-endpoint,required,env:PROBE_ENDPOINT" help:"endpoint to probe cleanup jobs from."`
Threshold int `arg:"--threshold,env:THRESHOLD" default:"3" help:"amount of consecutive successful probes to consider cleanup done."`
Period time.Duration `arg:"--period,env:PERIOD" default:"2s" help:"address to run readiness probe on."`
BlobSpeed *throttle.Byterate `arg:"--blob-speed,env:BLOB_SPEED" help:"Maximum write speed per request when serving blob layers. Should be an integer followed by unit Bps, KBps, MBps, GBps, or TBps."`
ContainerdRegistryConfigPath string `arg:"--containerd-registry-config-path,env:CONTAINERD_REGISTRY_CONFIG_PATH" default:"/etc/containerd/certs.d" help:"Directory where mirror configuration is written."`
MetricsAddr string `arg:"--metrics-addr,required,env:METRICS_ADDR" help:"address to serve metrics."`
LocalAddr string `arg:"--local-addr,required,env:LOCAL_ADDR" help:"Address that the local Spegel instance will be reached at."`
ContainerdSock string `arg:"--containerd-sock,env:CONTAINERD_SOCK" default:"/run/containerd/containerd.sock" help:"Endpoint of containerd service."`
ContainerdNamespace string `arg:"--containerd-namespace,env:CONTAINERD_NAMESPACE" default:"k8s.io" help:"Containerd namespace to fetch images from."`
ContainerdContentPath string `arg:"--containerd-content-path,env:CONTAINERD_CONTENT_PATH" default:"/var/lib/containerd/io.containerd.content.v1.content" help:"Path to Containerd content store"`
RouterAddr string `arg:"--router-addr,env:ROUTER_ADDR,required" help:"address to serve router."`
RegistryAddr string `arg:"--registry-addr,env:REGISTRY_ADDR,required" help:"address to server image registry."`
Registries []url.URL `arg:"--registries,env:REGISTRIES,required" help:"registries that are configured to be mirrored."`
MirrorResolveTimeout time.Duration `arg:"--mirror-resolve-timeout,env:MIRROR_RESOLVE_TIMEOUT" default:"5s" help:"Max duration spent finding a mirror."`
MirrorResolveRetries int `arg:"--mirror-resolve-retries,env:MIRROR_RESOLVE_RETRIES" default:"3" help:"Max amount of mirrors to attempt."`
ResolveLatestTag bool `arg:"--resolve-latest-tag,env:RESOLVE_LATEST_TAG" default:"true" help:"When true latest tags will be resolved to digests."`
}
type Arguments struct {
Configuration *ConfigurationCmd `arg:"subcommand:configuration"`
Registry *RegistryCmd `arg:"subcommand:registry"`
Cleanup *CleanupCmd `arg:"subcommand:cleanup"`
CleanupWait *CleanupWaitCmd `arg:"subcommand:cleanup-wait"`
LogLevel slog.Level `arg:"--log-level,env:LOG_LEVEL" default:"INFO" help:"Minimum log level to output. Value should be DEBUG, INFO, WARN, or ERROR."`
}
func main() {
args := &Arguments{}
arg.MustParse(args)
opts := slog.HandlerOptions{
AddSource: true,
Level: args.LogLevel,
zapLog, err := zap.NewProduction()
if err != nil {
panic(fmt.Sprintf("who watches the watchmen (%v)?", err))
}
handler := slog.NewJSONHandler(os.Stderr, &opts)
log := logr.FromSlogHandler(handler)
log := zapr.NewLogger(zapLog)
klog.SetLogger(log)
ctx := logr.NewContext(context.Background(), log)
err := run(ctx, args)
err = run(ctx, args)
if err != nil {
log.Error(err, "run exit with error")
log.Error(err, "")
os.Exit(1)
}
log.Info("gracefully shutdown")
@ -111,21 +98,14 @@ func run(ctx context.Context, args *Arguments) error {
return configurationCommand(ctx, args.Configuration)
case args.Registry != nil:
return registryCommand(ctx, args.Registry)
case args.Cleanup != nil:
return cleanupCommand(ctx, args.Cleanup)
case args.CleanupWait != nil:
return cleanupWaitCommand(ctx, args.CleanupWait)
default:
return errors.New("unknown subcommand")
return fmt.Errorf("unknown subcommand")
}
}
func configurationCommand(ctx context.Context, args *ConfigurationCmd) error {
username, password, err := loadBasicAuth()
if err != nil {
return err
}
err = oci.AddMirrorConfiguration(ctx, args.ContainerdRegistryConfigPath, args.MirroredRegistries, args.MirrorTargets, args.ResolveTags, args.PrependExisting, username, password)
fs := afero.NewOsFs()
err := oci.AddMirrorConfiguration(ctx, fs, args.ContainerdRegistryConfigPath, args.Registries, args.MirrorRegistries, args.ResolveTags, args.AppendMirrors)
if err != nil {
return err
}
@ -136,82 +116,17 @@ func registryCommand(ctx context.Context, args *RegistryCmd) (err error) {
log := logr.FromContextOrDiscard(ctx)
g, ctx := errgroup.WithContext(ctx)
username, password, err := loadBasicAuth()
// OCI Client
ociClient, err := oci.NewContainerd(args.ContainerdSock, args.ContainerdNamespace, args.ContainerdRegistryConfigPath, args.Registries, oci.WithContentPath(args.ContainerdContentPath))
if err != nil {
return err
}
err = ociClient.Verify(ctx)
if err != nil {
return err
}
ociClient := oci.NewClient()
// OCI Store
ociStore, err := oci.NewContainerd(args.ContainerdSock, args.ContainerdNamespace, args.ContainerdRegistryConfigPath, args.MirroredRegistries, oci.WithContentPath(args.ContainerdContentPath))
if err != nil {
return err
}
err = ociStore.Verify(ctx)
if err != nil {
return err
}
// Router
_, registryPort, err := net.SplitHostPort(args.RegistryAddr)
if err != nil {
return err
}
bootstrapper, err := getBootstrapper(args.BootstrapConfig)
if err != nil {
return err
}
routerOpts := []routing.P2PRouterOption{
routing.WithDataDir(args.DataDir),
}
router, err := routing.NewP2PRouter(ctx, args.RouterAddr, bootstrapper, registryPort, routerOpts...)
if err != nil {
return err
}
g.Go(func() error {
return router.Run(ctx)
})
// State tracking
g.Go(func() error {
err := state.Track(ctx, ociStore, router, args.ResolveLatestTag)
if err != nil {
return err
}
return nil
})
// Registry
registryOpts := []registry.RegistryOption{
registry.WithResolveLatestTag(args.ResolveLatestTag),
registry.WithResolveRetries(args.MirrorResolveRetries),
registry.WithResolveTimeout(args.MirrorResolveTimeout),
registry.WithLogger(log),
registry.WithBasicAuth(username, password),
}
reg, err := registry.NewRegistry(ociStore, router, registryOpts...)
if err != nil {
return err
}
regSrv, err := reg.Server(args.RegistryAddr)
if err != nil {
return err
}
g.Go(func() error {
if err := regSrv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
return err
}
return nil
})
g.Go(func() error {
<-ctx.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
return regSrv.Shutdown(shutdownCtx)
})
// Metrics, pprof, and debug web
// Metrics
metrics.Register()
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.HandlerFor(metrics.DefaultGatherer, promhttp.HandlerOpts{}))
@ -220,18 +135,10 @@ func registryCommand(ctx context.Context, args *RegistryCmd) (err error) {
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/heap", pprof.Handler("heap"))
mux.Handle("/debug/pprof/allocs", pprof.Handler("allocs"))
mux.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine"))
mux.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate"))
mux.Handle("/debug/pprof/block", pprof.Handler("block"))
mux.Handle("/debug/pprof/mutex", pprof.Handler("mutex"))
if args.DebugWebEnabled {
web, err := web.NewWeb(router, ociClient)
if err != nil {
return err
}
mux.Handle("/debug/web/", web.Handler(log))
}
metricsSrv := &http.Server{
Addr: args.MetricsAddr,
Handler: mux,
@ -249,6 +156,62 @@ func registryCommand(ctx context.Context, args *RegistryCmd) (err error) {
return metricsSrv.Shutdown(shutdownCtx)
})
// Router
_, registryPort, err := net.SplitHostPort(args.RegistryAddr)
if err != nil {
return err
}
bootstrapper, err := getBootstrapper(args.BootstrapConfig)
if err != nil {
return err
}
router, err := routing.NewP2PRouter(ctx, args.RouterAddr, bootstrapper, registryPort)
if err != nil {
return err
}
g.Go(func() error {
return router.Run(ctx)
})
g.Go(func() error {
<-ctx.Done()
return router.Close()
})
// State tracking
g.Go(func() error {
err := state.Track(ctx, ociClient, router, args.ResolveLatestTag)
if err != nil {
return err
}
return nil
})
// Registry
registryOpts := []registry.Option{
registry.WithResolveLatestTag(args.ResolveLatestTag),
registry.WithResolveRetries(args.MirrorResolveRetries),
registry.WithResolveTimeout(args.MirrorResolveTimeout),
registry.WithLocalAddress(args.LocalAddr),
registry.WithLogger(log),
}
if args.BlobSpeed != nil {
registryOpts = append(registryOpts, registry.WithBlobSpeed(*args.BlobSpeed))
}
reg := registry.NewRegistry(ociClient, router, registryOpts...)
regSrv := reg.Server(args.RegistryAddr)
g.Go(func() error {
if err := regSrv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
return err
}
return nil
})
g.Go(func() error {
<-ctx.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
return regSrv.Shutdown(shutdownCtx)
})
log.Info("running Spegel", "registry", args.RegistryAddr, "router", args.RouterAddr)
err = g.Wait()
if err != nil {
@ -257,44 +220,17 @@ func registryCommand(ctx context.Context, args *RegistryCmd) (err error) {
return nil
}
func cleanupCommand(ctx context.Context, args *CleanupCmd) error {
err := cleanup.Run(ctx, args.Addr, args.ContainerdRegistryConfigPath)
if err != nil {
return err
}
return nil
}
func cleanupWaitCommand(ctx context.Context, args *CleanupWaitCmd) error {
err := cleanup.Wait(ctx, args.ProbeEndpoint, args.Period, args.Threshold)
if err != nil {
return err
}
return nil
}
func getBootstrapper(cfg BootstrapConfig) (routing.Bootstrapper, error) { //nolint: ireturn // Return type can be different structs.
func getBootstrapper(cfg BootstrapConfig) (routing.Bootstrapper, error) {
switch cfg.BootstrapKind {
case "dns":
return routing.NewDNSBootstrapper(cfg.DNSBootstrapDomain, 10), nil
case "http":
return routing.NewHTTPBootstrapper(cfg.HTTPBootstrapAddr, cfg.HTTPBootstrapPeer), nil
case "static":
return routing.NewStaticBootstrapperFromStrings(cfg.StaticBootstrapPeers)
case "kubernetes":
cs, err := kubernetes.GetClientset(cfg.KubeconfigPath)
if err != nil {
return nil, err
}
return routing.NewKubernetesBootstrapper(cs, cfg.LeaderElectionNamespace, cfg.LeaderElectionName), nil
default:
return nil, fmt.Errorf("unknown bootstrap kind %s", cfg.BootstrapKind)
}
}
func loadBasicAuth() (string, string, error) {
dirPath := "/etc/secrets/basic-auth"
username, err := os.ReadFile(filepath.Join(dirPath, "username"))
if err != nil && !errors.Is(err, os.ErrNotExist) {
return "", "", err
}
password, err := os.ReadFile(filepath.Join(dirPath, "password"))
if err != nil && !errors.Is(err, os.ErrNotExist) {
return "", "", err
}
return string(username), string(password), nil
}

View File

@ -1,30 +0,0 @@
package httpx
import "net/http"
const (
HeaderContentType = "Content-Type"
HeaderContentLength = "Content-Length"
HeaderContentRange = "Content-Range"
HeaderRange = "Range"
HeaderAcceptRanges = "Accept-Ranges"
HeaderUserAgent = "User-Agent"
HeaderAccept = "Accept"
HeaderAuthorization = "Authorization"
HeaderWWWAuthenticate = "WWW-Authenticate"
HeaderXForwardedFor = "X-Forwarded-For"
)
const (
ContentTypeBinary = "application/octet-stream"
ContentTypeJSON = "application/json"
)
// CopyHeader copies header from source to destination.
func CopyHeader(dst, src http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}

View File

@ -1,20 +0,0 @@
package httpx
import (
"net/http"
"testing"
"github.com/stretchr/testify/require"
)
func TestCopyHeader(t *testing.T) {
t.Parallel()
src := http.Header{
"foo": []string{"2", "1"},
}
dst := http.Header{}
CopyHeader(dst, src)
require.Equal(t, []string{"2", "1"}, dst.Values("foo"))
}

View File

@ -1,54 +0,0 @@
package httpx
import (
"errors"
"io"
"net"
"net/http"
"time"
)
// BaseClient returns a http client with reasonable defaults set.
func BaseClient() *http.Client {
return &http.Client{
Transport: BaseTransport(),
Timeout: 10 * time.Second,
}
}
// BaseTransport returns a http transport with reasonable defaults set.
func BaseTransport() *http.Transport {
return &http.Transport{
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
}
const (
// MaxReadBytes is the maximum amount of bytes read when draining a response or reading error message.
MaxReadBytes = 512 * 1024
)
// DrainAndCloses empties the body buffer before closing the body.
func DrainAndClose(rc io.ReadCloser) error {
errs := []error{}
n, err := io.Copy(io.Discard, io.LimitReader(rc, MaxReadBytes+1))
if err != nil {
errs = append(errs, err)
}
if n > MaxReadBytes {
errs = append(errs, errors.New("reader has more data than max read bytes"))
}
err = rc.Close()
if err != nil {
errs = append(errs, err)
}
return errors.Join(errs...)
}

View File

@ -1,45 +0,0 @@
package httpx
import (
"bytes"
"io"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestBaseClient(t *testing.T) {
t.Parallel()
c := BaseClient()
require.Equal(t, 10*time.Second, c.Timeout)
_, ok := c.Transport.(*http.Transport)
require.True(t, ok)
}
func TestBaseTransport(t *testing.T) {
t.Parallel()
BaseTransport()
}
func TestDrainAndClose(t *testing.T) {
t.Parallel()
buf := bytes.NewBuffer(nil)
err := DrainAndClose(io.NopCloser(buf))
require.NoError(t, err)
require.Empty(t, buf.Bytes())
buf = bytes.NewBuffer(make([]byte, MaxReadBytes))
err = DrainAndClose(io.NopCloser(buf))
require.NoError(t, err)
require.Empty(t, buf.Bytes())
buf = bytes.NewBuffer(make([]byte, MaxReadBytes+10))
err = DrainAndClose(io.NopCloser(buf))
require.EqualError(t, err, "reader has more data than max read bytes")
require.Len(t, buf.Bytes(), 9)
}

View File

@ -1,32 +0,0 @@
package httpx
import "github.com/prometheus/client_golang/prometheus"
var (
HttpRequestDurHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: "http",
Name: "request_duration_seconds",
Help: "The latency of the HTTP requests.",
}, []string{"handler", "method", "code"})
HttpResponseSizeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: "http",
Name: "response_size_bytes",
Help: "The size of the HTTP responses.",
// 1kB up to 2GB
Buckets: prometheus.ExponentialBuckets(1024, 5, 10),
}, []string{"handler", "method", "code"})
HttpRequestsInflight = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Subsystem: "http",
Name: "requests_inflight",
Help: "The number of inflight requests being handled at the same time.",
}, []string{"handler"})
)
func RegisterMetrics(registerer prometheus.Registerer) {
if registerer == nil {
registerer = prometheus.DefaultRegisterer
}
registerer.MustRegister(HttpRequestDurHistogram)
registerer.MustRegister(HttpResponseSizeHistogram)
registerer.MustRegister(HttpRequestsInflight)
}

View File

@ -1,104 +0,0 @@
package httpx
import (
"errors"
"net"
"net/http"
"strconv"
"strings"
"time"
"github.com/go-logr/logr"
)
type HandlerFunc func(rw ResponseWriter, req *http.Request)
type ServeMux struct {
mux *http.ServeMux
log logr.Logger
}
func NewServeMux(log logr.Logger) *ServeMux {
return &ServeMux{
mux: http.NewServeMux(),
log: log,
}
}
func (s *ServeMux) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
h, pattern := s.mux.Handler(req)
if pattern == "" {
kvs := []any{
"path", req.URL.Path,
"status", http.StatusNotFound,
"method", req.Method,
"ip", GetClientIP(req),
}
s.log.Error(errors.New("page not found"), "", kvs...)
rw.WriteHeader(http.StatusNotFound)
return
}
h.ServeHTTP(rw, req)
}
func (s *ServeMux) Handle(pattern string, handler HandlerFunc) {
metricsPath := metricsFriendlyPath(pattern)
s.mux.HandleFunc(pattern, func(w http.ResponseWriter, req *http.Request) {
start := time.Now()
rw := &response{ResponseWriter: w}
defer func() {
latency := time.Since(start)
statusCode := strconv.FormatInt(int64(rw.Status()), 10)
HttpRequestsInflight.WithLabelValues(metricsPath).Add(-1)
HttpRequestDurHistogram.WithLabelValues(metricsPath, req.Method, statusCode).Observe(latency.Seconds())
HttpResponseSizeHistogram.WithLabelValues(metricsPath, req.Method, statusCode).Observe(float64(rw.Size()))
// Ignore logging requests to healthz to reduce log noise
if req.URL.Path == "/healthz" {
return
}
kvs := []any{
"path", req.URL.Path,
"status", rw.Status(),
"method", req.Method,
"latency", latency.String(),
"ip", GetClientIP(req),
"handler", rw.handler,
}
if rw.Status() >= 200 && rw.Status() < 400 {
s.log.Info("", kvs...)
return
}
s.log.Error(rw.Error(), "", kvs...)
}()
HttpRequestsInflight.WithLabelValues(metricsPath).Add(1)
handler(rw, req)
})
}
func GetClientIP(req *http.Request) string {
forwardedFor := req.Header.Get(HeaderXForwardedFor)
if forwardedFor != "" {
comps := strings.Split(forwardedFor, ",")
if len(comps) > 1 {
return comps[0]
}
return forwardedFor
}
h, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return ""
}
return h
}
func metricsFriendlyPath(pattern string) string {
_, path, _ := strings.Cut(pattern, "/")
path = "/" + path
if strings.HasSuffix(path, "/") {
return path + "*"
}
return path
}

View File

@ -1,160 +0,0 @@
package httpx
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require"
)
func TestServeMux(t *testing.T) {
t.Parallel()
registerer := prometheus.NewRegistry()
RegisterMetrics(registerer)
m := NewServeMux(logr.Discard())
handlersCalled := []string{}
m.Handle("/exact", func(rw ResponseWriter, req *http.Request) {
handlersCalled = append(handlersCalled, "exact")
})
m.Handle("/prefix/", func(rw ResponseWriter, req *http.Request) {
handlersCalled = append(handlersCalled, "prefix")
})
paths := []string{"/prefix/", "/exact", "/exact/foo", "/prefix/bar"}
for _, path := range paths {
rw := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "http://localhost"+path, nil)
m.ServeHTTP(rw, req)
}
expectedHandlersCalled := []string{"prefix", "exact", "prefix"}
require.Equal(t, expectedHandlersCalled, handlersCalled)
expectedMetrics := `
# HELP http_requests_inflight The number of inflight requests being handled at the same time.
# TYPE http_requests_inflight gauge
http_requests_inflight{handler="/exact"} 0
http_requests_inflight{handler="/prefix/*"} 0
`
err := testutil.CollectAndCompare(HttpRequestsInflight, strings.NewReader(expectedMetrics))
require.NoError(t, err)
expectedMetrics = `
# HELP http_response_size_bytes The size of the HTTP responses.
# TYPE http_response_size_bytes histogram
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="1024"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="5120"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="25600"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="128000"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="640000"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="3.2e+06"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="1.6e+07"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="8e+07"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="4e+08"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="2e+09"} 1
http_response_size_bytes_bucket{code="200",handler="/exact",method="GET",le="+Inf"} 1
http_response_size_bytes_sum{code="200",handler="/exact",method="GET"} 0
http_response_size_bytes_count{code="200",handler="/exact",method="GET"} 1
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="1024"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="5120"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="25600"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="128000"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="640000"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="3.2e+06"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="1.6e+07"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="8e+07"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="4e+08"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="2e+09"} 2
http_response_size_bytes_bucket{code="200",handler="/prefix/*",method="GET",le="+Inf"} 2
http_response_size_bytes_sum{code="200",handler="/prefix/*",method="GET"} 0
http_response_size_bytes_count{code="200",handler="/prefix/*",method="GET"} 2
`
err = testutil.CollectAndCompare(HttpResponseSizeHistogram, strings.NewReader(expectedMetrics))
require.NoError(t, err)
}
func TestGetClientIP(t *testing.T) {
t.Parallel()
tests := []struct {
name string
request *http.Request
expected string
}{
{
name: "x forwarded for single",
request: &http.Request{
Header: http.Header{
HeaderXForwardedFor: []string{"localhost"},
},
},
expected: "localhost",
},
{
name: "x forwarded for multiple",
request: &http.Request{
Header: http.Header{
HeaderXForwardedFor: []string{"localhost,127.0.0.1"},
},
},
expected: "localhost",
},
{
name: "remote address",
request: &http.Request{
RemoteAddr: "127.0.0.1:9090",
},
expected: "127.0.0.1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
ip := GetClientIP(tt.request)
require.Equal(t, tt.expected, ip)
})
}
}
func TestMetricsFriendlyPath(t *testing.T) {
t.Parallel()
tests := []struct {
pattern string
expected string
}{
{
pattern: "/",
expected: "/*",
},
{
pattern: "/exact",
expected: "/exact",
},
{
pattern: "/prefix/",
expected: "/prefix/*",
},
{
pattern: "/chats/{id}/message/{index}",
expected: "/chats/{id}/message/{index}",
},
}
for _, method := range []string{"", "GET ", "HEAD "} {
for _, tt := range tests {
t.Run(tt.pattern, func(t *testing.T) {
t.Parallel()
metricsPath := metricsFriendlyPath(method + tt.pattern)
require.Equal(t, tt.expected, metricsPath)
})
}
}
}

View File

@ -1,26 +0,0 @@
package httpx
import (
"fmt"
"strings"
)
type ByteRange struct {
Start int64 `json:"start"`
End int64 `json:"end"`
}
func FormatRangeHeader(byteRange ByteRange) string {
return fmt.Sprintf("bytes=%d-%d", byteRange.Start, byteRange.End)
}
func FormatMultipartRangeHeader(byteRanges []ByteRange) string {
if len(byteRanges) == 0 {
return ""
}
ranges := []string{}
for _, br := range byteRanges {
ranges = append(ranges, fmt.Sprintf("%d-%d", br.Start, br.End))
}
return "bytes=" + strings.Join(ranges, ", ")
}

View File

@ -1,35 +0,0 @@
package httpx
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestFormatRangeHeader(t *testing.T) {
t.Parallel()
br := ByteRange{Start: 10, End: 2000}
val := FormatRangeHeader(br)
require.Equal(t, "bytes=10-2000", val)
}
func TestFormatMultipartRangeHeader(t *testing.T) {
t.Parallel()
brr := []ByteRange{
{
Start: 10,
End: 100,
},
{
Start: 0,
End: 1,
},
}
val := FormatMultipartRangeHeader(brr)
require.Equal(t, "bytes=10-100, 0-1", val)
val = FormatMultipartRangeHeader(nil)
require.Empty(t, val)
}

View File

@ -1,79 +0,0 @@
package httpx
import (
"errors"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestResponseWriter(t *testing.T) {
t.Parallel()
var httpRw http.ResponseWriter = &response{}
_, ok := httpRw.(io.ReaderFrom)
require.True(t, ok)
httpRw = httptest.NewRecorder()
rw := &response{
ResponseWriter: httpRw,
}
require.Equal(t, httpRw, rw.Unwrap())
require.NoError(t, rw.Error())
require.Equal(t, int64(0), rw.Size())
require.Equal(t, http.StatusOK, rw.Status())
rw = &response{
ResponseWriter: httptest.NewRecorder(),
}
rw.WriteHeader(http.StatusNotFound)
require.True(t, rw.writtenHeader)
require.Equal(t, http.StatusNotFound, rw.Status())
rw.WriteHeader(http.StatusBadGateway)
require.Equal(t, http.StatusNotFound, rw.Status())
_, err := rw.Write([]byte("foo"))
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, rw.Status())
rw = &response{
ResponseWriter: httptest.NewRecorder(),
}
err = errors.New("some server error")
rw.WriteError(http.StatusInternalServerError, err)
require.Equal(t, err, rw.Error())
require.Equal(t, http.StatusInternalServerError, rw.Status())
rw = &response{
ResponseWriter: httptest.NewRecorder(),
}
first := "hello world"
n, err := rw.Write([]byte(first))
require.Equal(t, http.StatusOK, rw.Status())
require.NoError(t, err)
require.Equal(t, len(first), n)
require.Equal(t, int64(len(first)), rw.Size())
second := "foo bar"
n, err = rw.Write([]byte(second))
require.NoError(t, err)
require.Equal(t, len(second), n)
require.Equal(t, int64(len(first)+len(second)), rw.Size())
rw = &response{
ResponseWriter: httptest.NewRecorder(),
}
r := strings.NewReader("reader")
readFromN, err := rw.ReadFrom(r)
require.NoError(t, err)
require.Equal(t, r.Size(), readFromN)
require.Equal(t, r.Size(), rw.Size())
rw = &response{
ResponseWriter: httptest.NewRecorder(),
}
rw.SetHandler("foo")
require.Equal(t, "foo", rw.handler)
}

View File

@ -1,64 +0,0 @@
package httpx
import (
"errors"
"fmt"
"io"
"net/http"
"slices"
"strings"
)
type StatusError struct {
Message string
ExpectedCodes []int
StatusCode int
}
func (e *StatusError) Error() string {
expectedCodeStrs := []string{}
for _, expected := range e.ExpectedCodes {
expectedCodeStrs = append(expectedCodeStrs, fmt.Sprintf("%d %s", expected, http.StatusText(expected)))
}
msg := fmt.Sprintf("expected one of the following statuses [%s], but received %d %s", strings.Join(expectedCodeStrs, ", "), e.StatusCode, http.StatusText(e.StatusCode))
if e.Message != "" {
msg += ": " + e.Message
}
return msg
}
func CheckResponseStatus(resp *http.Response, expectedCodes ...int) error {
if len(expectedCodes) == 0 {
return errors.New("expected codes cannot be empty")
}
if slices.Contains(expectedCodes, resp.StatusCode) {
return nil
}
message, messageErr := getErrorMessage(resp)
statusErr := &StatusError{
Message: message,
ExpectedCodes: expectedCodes,
StatusCode: resp.StatusCode,
}
return errors.Join(statusErr, messageErr)
}
func getErrorMessage(resp *http.Response) (string, error) {
if resp.Request.Method == http.MethodHead {
return "", nil
}
contentTypes := []string{
"text/plain",
"text/html",
"application/json",
"application/xml",
}
if !slices.Contains(contentTypes, resp.Header.Get(HeaderContentType)) {
return "", nil
}
b, err := io.ReadAll(io.LimitReader(resp.Body, MaxReadBytes))
if err != nil {
return "", err
}
return string(b), err
}

View File

@ -1,97 +0,0 @@
package httpx
import (
"bytes"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
)
func TestStatusError(t *testing.T) {
t.Parallel()
tests := []struct {
name string
contentType string
body string
expectedError string
requestMethod string
expectedCodes []int
statusCode int
}{
{
name: "status code matches one of expected",
contentType: "text/plain",
body: "Hello World",
statusCode: http.StatusOK,
expectedCodes: []int{http.StatusNotFound, http.StatusOK},
requestMethod: http.MethodGet,
expectedError: "",
},
{
name: "no expected status codes",
contentType: "text/plain",
statusCode: http.StatusOK,
expectedCodes: []int{},
expectedError: "expected codes cannot be empty",
},
{
name: "wrong code with text content and GET request",
contentType: "text/plain",
body: "Hello World",
statusCode: http.StatusNotFound,
expectedCodes: []int{http.StatusOK},
requestMethod: http.MethodGet,
expectedError: "expected one of the following statuses [200 OK], but received 404 Not Found: Hello World",
},
{
name: "wrong code with text content and HEAD request",
contentType: "text/plain",
body: "Hello World",
statusCode: http.StatusNotFound,
expectedCodes: []int{http.StatusOK, http.StatusPartialContent},
requestMethod: http.MethodHead,
expectedError: "expected one of the following statuses [200 OK, 206 Partial Content], but received 404 Not Found",
},
{
name: "wrong code with text content and GET request but octet stream",
contentType: "application/octet-stream",
body: "Hello World",
statusCode: http.StatusNotFound,
expectedCodes: []int{http.StatusOK},
requestMethod: http.MethodGet,
expectedError: "expected one of the following statuses [200 OK], but received 404 Not Found",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
rec := httptest.NewRecorder()
rec.WriteHeader(tt.statusCode)
rec.Header().Set(HeaderContentType, tt.contentType)
rec.Body = bytes.NewBufferString(tt.body)
resp := &http.Response{
StatusCode: tt.statusCode,
Status: http.StatusText(tt.statusCode),
Header: rec.Header(),
Body: io.NopCloser(rec.Body),
Request: &http.Request{
Method: tt.requestMethod,
},
}
err := CheckResponseStatus(resp, tt.expectedCodes...)
if tt.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, tt.expectedError)
}
})
}
}

View File

@ -2,8 +2,6 @@ package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/spegel-org/spegel/pkg/httpx"
)
var (
@ -20,7 +18,7 @@ var (
MirrorRequestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "spegel_mirror_requests_total",
Help: "Total number of mirror requests.",
}, []string{"registry", "cache"})
}, []string{"registry", "cache", "source"})
ResolveDurHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "spegel_resolve_duration_seconds",
Help: "The duration for router to resolve a peer.",
@ -41,6 +39,21 @@ var (
Name: "spegel_advertised_keys",
Help: "Number of keys advertised to be available.",
}, []string{"registry"})
HttpRequestDurHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: "http",
Name: "request_duration_seconds",
Help: "The latency of the HTTP requests.",
}, []string{"handler", "method", "code"})
HttpResponseSizeHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: "http",
Name: "response_size_bytes",
Help: "The size of the HTTP responses.",
}, []string{"handler", "method", "code"})
HttpRequestsInflight = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Subsystem: "http",
Name: "requests_inflight",
Help: "The number of inflight requests being handled at the same time.",
}, []string{"handler"})
)
func Register() {
@ -50,5 +63,7 @@ func Register() {
DefaultRegisterer.MustRegister(AdvertisedImageTags)
DefaultRegisterer.MustRegister(AdvertisedImageDigests)
DefaultRegisterer.MustRegister(AdvertisedKeys)
httpx.RegisterMetrics(DefaultRegisterer)
DefaultRegisterer.MustRegister(HttpRequestDurHistogram)
DefaultRegisterer.MustRegister(HttpResponseSizeHistogram)
DefaultRegisterer.MustRegister(HttpRequestsInflight)
}

View File

@ -1,11 +0,0 @@
package metrics
import (
"testing"
)
func TestRegister(t *testing.T) {
t.Parallel()
Register()
}

View File

@ -1,358 +0,0 @@
package oci
import (
"context"
"encoding/json"
"errors"
"io"
"net/http"
"net/url"
"path"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/containerd/v2/core/images"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spegel-org/spegel/pkg/httpx"
)
const (
HeaderDockerDigest = "Docker-Content-Digest"
)
type FetchConfig struct {
Mirror *url.URL
Header http.Header
}
func (cfg *FetchConfig) Apply(opts ...FetchOption) error {
for _, opt := range opts {
if opt == nil {
continue
}
if err := opt(cfg); err != nil {
return err
}
}
return nil
}
type FetchOption func(cfg *FetchConfig) error
func WithFetchMirror(mirror *url.URL) FetchOption {
return func(cfg *FetchConfig) error {
cfg.Mirror = mirror
return nil
}
}
func WithFetchHeader(header http.Header) FetchOption {
return func(cfg *FetchConfig) error {
cfg.Header = header
return nil
}
}
type Client struct {
hc *http.Client
tc sync.Map
}
func NewClient() *Client {
hc := httpx.BaseClient()
hc.Timeout = 0
return &Client{
hc: hc,
tc: sync.Map{},
}
}
type PullMetric struct {
Digest digest.Digest
ContentType string
ContentLength int64
Duration time.Duration
}
func (c *Client) Pull(ctx context.Context, img Image, opts ...FetchOption) ([]PullMetric, error) {
pullMetrics := []PullMetric{}
queue := []DistributionPath{
{
Kind: DistributionKindManifest,
Name: img.Repository,
Digest: img.Digest,
Tag: img.Tag,
Registry: img.Registry,
},
}
for len(queue) > 0 {
dist := queue[0]
queue = queue[1:]
start := time.Now()
desc, err := func() (ocispec.Descriptor, error) {
rc, desc, err := c.Get(ctx, dist, nil, opts...)
if err != nil {
return ocispec.Descriptor{}, err
}
defer httpx.DrainAndClose(rc)
switch dist.Kind {
case DistributionKindBlob:
// Right now we are just discarding the contents because we do not have a writable store.
_, copyErr := io.Copy(io.Discard, rc)
closeErr := rc.Close()
err := errors.Join(copyErr, closeErr)
if err != nil {
return ocispec.Descriptor{}, err
}
case DistributionKindManifest:
b, readErr := io.ReadAll(rc)
closeErr := rc.Close()
err = errors.Join(readErr, closeErr)
if err != nil {
return ocispec.Descriptor{}, err
}
switch desc.MediaType {
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
var idx ocispec.Index
if err := json.Unmarshal(b, &idx); err != nil {
return ocispec.Descriptor{}, err
}
for _, m := range idx.Manifests {
// TODO: Add platform option.
//nolint: staticcheck // Simplify in the future.
if !(m.Platform.OS == runtime.GOOS && m.Platform.Architecture == runtime.GOARCH) {
continue
}
queue = append(queue, DistributionPath{
Kind: DistributionKindManifest,
Name: dist.Name,
Digest: m.Digest,
Registry: dist.Registry,
})
}
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
var manifest ocispec.Manifest
err := json.Unmarshal(b, &manifest)
if err != nil {
return ocispec.Descriptor{}, err
}
queue = append(queue, DistributionPath{
Kind: DistributionKindBlob,
Name: dist.Name,
Digest: manifest.Config.Digest,
Registry: dist.Registry,
})
for _, layer := range manifest.Layers {
queue = append(queue, DistributionPath{
Kind: DistributionKindBlob,
Name: dist.Name,
Digest: layer.Digest,
Registry: dist.Registry,
})
}
}
}
return desc, nil
}()
if err != nil {
return nil, err
}
metric := PullMetric{
Digest: desc.Digest,
Duration: time.Since(start),
ContentType: desc.MediaType,
ContentLength: desc.Size,
}
pullMetrics = append(pullMetrics, metric)
}
return pullMetrics, nil
}
func (c *Client) Head(ctx context.Context, dist DistributionPath, opts ...FetchOption) (ocispec.Descriptor, error) {
rc, desc, err := c.fetch(ctx, http.MethodHead, dist, nil, opts...)
if err != nil {
return ocispec.Descriptor{}, err
}
defer httpx.DrainAndClose(rc)
return desc, nil
}
func (c *Client) Get(ctx context.Context, dist DistributionPath, brr []httpx.ByteRange, opts ...FetchOption) (io.ReadCloser, ocispec.Descriptor, error) {
rc, desc, err := c.fetch(ctx, http.MethodGet, dist, brr, opts...)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
return rc, desc, nil
}
func (c *Client) fetch(ctx context.Context, method string, dist DistributionPath, brr []httpx.ByteRange, opts ...FetchOption) (io.ReadCloser, ocispec.Descriptor, error) {
cfg := FetchConfig{}
err := cfg.Apply(opts...)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
tcKey := dist.Registry + dist.Name
u := dist.URL()
if cfg.Mirror != nil {
u.Scheme = cfg.Mirror.Scheme
u.Host = cfg.Mirror.Host
u.Path = path.Join(cfg.Mirror.Path, u.Path)
}
if u.Host == "docker.io" {
u.Host = "registry-1.docker.io"
}
for range 2 {
req, err := http.NewRequestWithContext(ctx, method, u.String(), nil)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
httpx.CopyHeader(req.Header, cfg.Header)
req.Header.Set(httpx.HeaderUserAgent, "spegel")
req.Header.Add(httpx.HeaderAccept, "application/vnd.oci.image.manifest.v1+json")
req.Header.Add(httpx.HeaderAccept, "application/vnd.docker.distribution.manifest.v2+json")
req.Header.Add(httpx.HeaderAccept, "application/vnd.oci.image.index.v1+json")
req.Header.Add(httpx.HeaderAccept, "application/vnd.docker.distribution.manifest.list.v2+json")
if len(brr) > 0 {
req.Header.Add(httpx.HeaderRange, httpx.FormatMultipartRangeHeader(brr))
}
token, ok := c.tc.Load(tcKey)
if ok {
//nolint: errcheck // We know it will be a string.
req.Header.Set(httpx.HeaderAuthorization, "Bearer "+token.(string))
}
resp, err := c.hc.Do(req)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
if resp.StatusCode == http.StatusUnauthorized {
c.tc.Delete(tcKey)
wwwAuth := resp.Header.Get(httpx.HeaderWWWAuthenticate)
token, err = getBearerToken(ctx, wwwAuth, c.hc)
if err != nil {
return nil, ocispec.Descriptor{}, err
}
c.tc.Store(tcKey, token)
continue
}
err = httpx.CheckResponseStatus(resp, http.StatusOK, http.StatusPartialContent)
if err != nil {
httpx.DrainAndClose(resp.Body)
return nil, ocispec.Descriptor{}, err
}
// Handle optional headers for blobs.
header := resp.Header.Clone()
if dist.Kind == DistributionKindBlob {
if header.Get(httpx.HeaderContentType) == "" {
header.Set(httpx.HeaderContentType, httpx.ContentTypeBinary)
}
if header.Get(HeaderDockerDigest) == "" {
header.Set(HeaderDockerDigest, dist.Digest.String())
}
}
desc, err := DescriptorFromHeader(header)
if err != nil {
httpx.DrainAndClose(resp.Body)
return nil, ocispec.Descriptor{}, err
}
return resp.Body, desc, nil
}
return nil, ocispec.Descriptor{}, errors.New("could not perform request")
}
func getBearerToken(ctx context.Context, wwwAuth string, client *http.Client) (string, error) {
if !strings.HasPrefix(wwwAuth, "Bearer ") {
return "", errors.New("unsupported auth scheme")
}
params := map[string]string{}
for _, part := range strings.Split(wwwAuth[len("Bearer "):], ",") {
kv := strings.SplitN(strings.TrimSpace(part), "=", 2)
if len(kv) == 2 {
params[kv[0]] = strings.Trim(kv[1], `"`)
}
}
authURL, err := url.Parse(params["realm"])
if err != nil {
return "", err
}
q := authURL.Query()
if service, ok := params["service"]; ok {
q.Set("service", service)
}
if scope, ok := params["scope"]; ok {
q.Set("scope", scope)
}
authURL.RawQuery = q.Encode()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, authURL.String(), nil)
if err != nil {
return "", err
}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer httpx.DrainAndClose(resp.Body)
err = httpx.CheckResponseStatus(resp, http.StatusOK)
if err != nil {
return "", err
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
tokenResp := struct {
Token string `json:"token"`
}{}
err = json.Unmarshal(b, &tokenResp)
if err != nil {
return "", err
}
return tokenResp.Token, nil
}
func DescriptorFromHeader(header http.Header) (ocispec.Descriptor, error) {
mediaType := header.Get(httpx.HeaderContentType)
if mediaType == "" {
return ocispec.Descriptor{}, errors.New("content type cannot be empty")
}
contentLength := header.Get(httpx.HeaderContentLength)
if contentLength == "" {
return ocispec.Descriptor{}, errors.New("content length cannot be empty")
}
size, err := strconv.ParseInt(contentLength, 10, 64)
if err != nil {
return ocispec.Descriptor{}, err
}
dgst, err := digest.Parse(header.Get(HeaderDockerDigest))
if err != nil {
return ocispec.Descriptor{}, err
}
desc := ocispec.Descriptor{
MediaType: mediaType,
Size: size,
Digest: dgst,
}
return desc, nil
}
func WriteDescriptorToHeader(desc ocispec.Descriptor, header http.Header) {
header.Set(httpx.HeaderContentType, desc.MediaType)
header.Set(httpx.HeaderContentLength, strconv.FormatInt(desc.Size, 10))
header.Set(HeaderDockerDigest, desc.Digest.String())
}

View File

@ -1,116 +0,0 @@
package oci
import (
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"testing"
"cuelabs.dev/go/oci/ociregistry/ocimem"
"cuelabs.dev/go/oci/ociregistry/ociserver"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spegel-org/spegel/pkg/httpx"
"github.com/stretchr/testify/require"
)
func TestClient(t *testing.T) {
t.Parallel()
img := Image{
Repository: "test/image",
Tag: "latest",
}
mem := ocimem.New()
blobs := []ocispec.Descriptor{
{
MediaType: "application/vnd.oci.image.config.v1+json",
Digest: digest.Digest("sha256:68b8a989a3e08ddbdb3a0077d35c0d0e59c9ecf23d0634584def8bdbb7d6824f"),
Size: 529,
},
{
MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
Digest: digest.Digest("sha256:3caa2469de2a23cbcc209dd0b9d01cd78ff9a0f88741655991d36baede5b0996"),
Size: 118,
},
}
for _, blob := range blobs {
f, err := os.Open(filepath.Join("testdata", "blobs", "sha256", blob.Digest.Encoded()))
require.NoError(t, err)
_, err = mem.PushBlob(t.Context(), img.Repository, blob, f)
f.Close()
require.NoError(t, err)
}
manifests := []ocispec.Descriptor{
{
MediaType: "application/vnd.oci.image.manifest.v1+json",
Digest: digest.Digest("sha256:b6d6089ca6c395fd563c2084f5dd7bc56a2f5e6a81413558c5be0083287a77e9"),
},
}
for _, manifest := range manifests {
b, err := os.ReadFile(filepath.Join("testdata", "blobs", "sha256", manifest.Digest.Encoded()))
require.NoError(t, err)
_, err = mem.PushManifest(t.Context(), img.Repository, img.Tag, b, manifest.MediaType)
require.NoError(t, err)
}
reg := ociserver.New(mem, nil)
srv := httptest.NewServer(reg)
t.Cleanup(func() {
srv.Close()
})
client := NewClient()
mirror, err := url.Parse(srv.URL)
require.NoError(t, err)
pullResults, err := client.Pull(t.Context(), img, WithFetchMirror(mirror))
require.NoError(t, err)
require.Len(t, pullResults, 3)
dist := DistributionPath{
Kind: DistributionKindBlob,
Name: img.Repository,
Digest: blobs[0].Digest,
}
desc, err := client.Head(t.Context(), dist, WithFetchMirror(mirror))
require.NoError(t, err)
require.Equal(t, dist.Digest, desc.Digest)
require.Equal(t, httpx.ContentTypeBinary, desc.MediaType)
}
func TestDescriptorHeader(t *testing.T) {
t.Parallel()
header := http.Header{}
desc := ocispec.Descriptor{
MediaType: "foo",
Size: 909,
Digest: digest.Digest("sha256:b6d6089ca6c395fd563c2084f5dd7bc56a2f5e6a81413558c5be0083287a77e9"),
}
WriteDescriptorToHeader(desc, header)
require.Equal(t, "foo", header.Get(httpx.HeaderContentType))
require.Equal(t, "909", header.Get(httpx.HeaderContentLength))
require.Equal(t, "sha256:b6d6089ca6c395fd563c2084f5dd7bc56a2f5e6a81413558c5be0083287a77e9", header.Get(HeaderDockerDigest))
headerDesc, err := DescriptorFromHeader(header)
require.NoError(t, err)
require.Equal(t, desc, headerDesc)
header = http.Header{}
_, err = DescriptorFromHeader(header)
require.EqualError(t, err, "content type cannot be empty")
header.Set(httpx.HeaderContentType, "test")
_, err = DescriptorFromHeader(header)
require.EqualError(t, err, "content length cannot be empty")
header.Set(httpx.HeaderContentLength, "wrong")
_, err = DescriptorFromHeader(header)
require.EqualError(t, err, "strconv.ParseInt: parsing \"wrong\": invalid syntax")
header.Set(httpx.HeaderContentLength, "250000")
_, err = DescriptorFromHeader(header)
require.EqualError(t, err, "invalid checksum digest format")
header.Set(HeaderDockerDigest, "foobar")
_, err = DescriptorFromHeader(header)
require.EqualError(t, err, "invalid checksum digest format")
}

File diff suppressed because it is too large Load Diff

View File

@ -1,37 +1,18 @@
package oci
import (
"context"
"fmt"
iofs "io/fs"
"maps"
"net/url"
"os"
"path/filepath"
"testing"
"github.com/containerd/containerd/v2/pkg/filters"
"github.com/go-logr/logr"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
)
func TestNewContainerd(t *testing.T) {
t.Parallel()
c, err := NewContainerd("socket", "namespace", "foo", nil)
require.NoError(t, err)
require.Empty(t, c.contentPath)
require.Nil(t, c.client)
require.Equal(t, "foo", c.registryConfigPath)
c, err = NewContainerd("socket", "namespace", "foo", nil, WithContentPath("local"))
require.NoError(t, err)
require.Equal(t, "local", c.contentPath)
}
func TestVerifyStatusResponse(t *testing.T) {
t.Parallel()
tests := []struct {
name string
configPath string
@ -73,13 +54,12 @@ func TestVerifyStatusResponse(t *testing.T) {
expectedErrMsg: "Containerd discard unpacked layers cannot be enabled",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
resp := &runtimeapi.StatusResponse{
Info: map[string]string{
"config": fmt.Sprintf(`{"registry": {"configPath": %q}, "containerd": {"discardUnpackedLayers": %v}}`, tt.configPath, tt.discardUnpackedLayers),
"config": fmt.Sprintf(`{"registry": {"configPath": "%s"}, "containerd": {"runtimes":{"discardUnpackedLayers": %v}}}`, tt.configPath, tt.discardUnpackedLayers),
},
}
err := verifyStatusResponse(resp, tt.requiredConfigPath)
@ -92,278 +72,84 @@ func TestVerifyStatusResponse(t *testing.T) {
}
}
func TestVerifyStatusResponseMissingRequired(t *testing.T) {
t.Parallel()
tests := []struct {
name string
config string
expectedErrMsg string
}{
{
name: "missing discard upacked layers false",
config: `{"registry": {"configPath": "foo"}, "containerd": {"runtimes":{"discardUnpackedLayers": false}}}`,
expectedErrMsg: "field containerd.discardUnpackedLayers missing from config",
},
{
name: "missing discard upacked layers true",
config: `{"registry": {"configPath": "foo"}, "containerd": {"runtimes":{"discardUnpackedLayers": true}}}`,
expectedErrMsg: "field containerd.discardUnpackedLayers missing from config",
},
{
name: "missing containerd field",
config: `{"registry": {"configPath": "foo"}}`,
expectedErrMsg: "field containerd.discardUnpackedLayers missing from config",
},
{
name: "missing registry field",
config: `{"containerd": {"discardUnpackedLayers": false}}`,
expectedErrMsg: "field registry.configPath missing from config",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
resp := &runtimeapi.StatusResponse{
Info: map[string]string{
"config": tt.config,
},
}
err := verifyStatusResponse(resp, "foo")
require.EqualError(t, err, tt.expectedErrMsg)
})
}
}
func TestBackupConfig(t *testing.T) {
t.Parallel()
log := logr.Discard()
configPath := t.TempDir()
err := backupConfig(log, configPath)
require.NoError(t, err)
ok, err := dirExists(filepath.Join(configPath, "_backup"))
require.NoError(t, err)
require.True(t, ok)
files, err := os.ReadDir(filepath.Join(configPath, "_backup"))
require.NoError(t, err)
require.Empty(t, files)
configPath = t.TempDir()
err = os.WriteFile(filepath.Join(configPath, "test.txt"), []byte("Hello World"), 0o644)
require.NoError(t, err)
err = backupConfig(log, configPath)
require.NoError(t, err)
ok, err = dirExists(filepath.Join(configPath, "_backup"))
require.NoError(t, err)
require.True(t, ok)
files, err = os.ReadDir(filepath.Join(configPath, "_backup"))
require.NoError(t, err)
require.Len(t, files, 1)
}
func TestParseContentRegistries(t *testing.T) {
t.Parallel()
tests := []struct {
name string
labels map[string]string
expected []string
}{
{
name: "no labels",
labels: map[string]string{},
expected: []string{},
},
{
name: "one matching",
labels: map[string]string{
"containerd.io/distribution.source.docker.io": "library/alpine",
},
expected: []string{"docker.io"},
},
{
name: "multiple matching",
labels: map[string]string{
"containerd.io/distribution.source.example.com": "foo",
"containerd.io/distribution.source.ghcr.io": "spegel-org/spegel",
},
expected: []string{"ghcr.io", "example.com"},
},
}
for _, tt := range tests {
t.Run(t.Name(), func(t *testing.T) {
t.Parallel()
registries := parseContentRegistries(tt.labels)
require.ElementsMatch(t, tt.expected, registries)
})
}
}
func TestFeaturesForVersion(t *testing.T) {
t.Parallel()
tests := []struct {
version string
expectedString string
expectedFeatures []Feature
}{
{
version: "v2.0.2",
expectedFeatures: []Feature{},
expectedString: "",
},
{
version: "2.1.0",
expectedFeatures: []Feature{FeatureContentEvent},
expectedString: "ContentEvent",
},
{
version: "v1.7.27",
expectedFeatures: []Feature{FeatureConfigCheck},
expectedString: "ConfigCheck",
},
{
version: "1.6.0",
expectedFeatures: []Feature{FeatureConfigCheck},
expectedString: "ConfigCheck",
},
}
for _, tt := range tests {
// Testing with a suffix is important as some Linux distributions will modify the version
// with a non Semver compliant modification. Even if the version is supposed to comply with
// semver that may not always be the case.
for _, suffix := range []string{"", "~ds1"} {
version := tt.version + suffix
t.Run(version, func(t *testing.T) {
t.Parallel()
feats, err := featuresForVersion(tt.version)
require.NoError(t, err)
for _, feat := range tt.expectedFeatures {
ok := feats.Has(feat)
require.True(t, ok)
}
require.Equal(t, tt.expectedString, feats.String())
})
}
}
}
func TestCreateFilter(t *testing.T) {
t.Parallel()
tests := []struct {
name string
expectedImageFilter []string
expectedEventFilter []string
expectedContentFilter []string
registries []string
name string
expectedListFilter string
expectedEventFilter string
registries []string
}{
{
name: "with registry filtering",
registries: []string{"https://docker.io", "https://gcr.io"},
expectedImageFilter: []string{`name~="^(docker\\.io|gcr\\.io)/"`},
expectedEventFilter: []string{`topic~="/images/create|/images/delete",event.name~="^(docker\\.io|gcr\\.io)/"`, `topic~="/content/create"`},
expectedContentFilter: []string{`labels."containerd.io/distribution.source.docker.io"~="^."`, `labels."containerd.io/distribution.source.gcr.io"~="^."`},
name: "only registries",
registries: []string{"https://docker.io", "https://gcr.io"},
expectedListFilter: `name~="^(docker\\.io|gcr\\.io)/"`,
expectedEventFilter: `topic~="/images/create|/images/update|/images/delete",event.name~="^(docker\\.io|gcr\\.io)/"`,
},
{
name: "without registry filtering",
registries: []string{},
expectedImageFilter: []string{`name~="^.+/"`},
expectedEventFilter: []string{`topic~="/images/create|/images/delete",event.name~="^.+/"`, `topic~="/content/create"`},
expectedContentFilter: []string{},
name: "additional image filtes",
registries: []string{"https://docker.io", "https://gcr.io"},
expectedListFilter: `name~="^(docker\\.io|gcr\\.io)/"`,
expectedEventFilter: `topic~="/images/create|/images/update|/images/delete",event.name~="^(docker\\.io|gcr\\.io)/"`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
imageFilter, eventFilter, contentFilter := createFilters(stringListToUrlList(t, tt.registries))
require.Equal(t, tt.expectedImageFilter, imageFilter)
_, err := filters.ParseAll(imageFilter...)
require.NoError(t, err)
listFilter, eventFilter := createFilters(stringListToUrlList(t, tt.registries))
require.Equal(t, tt.expectedListFilter, listFilter)
require.Equal(t, tt.expectedEventFilter, eventFilter)
_, err = filters.ParseAll(eventFilter...)
require.NoError(t, err)
require.Equal(t, tt.expectedContentFilter, contentFilter)
_, err = filters.ParseAll(contentFilter...)
require.NoError(t, err)
})
}
}
func TestMirrorConfiguration(t *testing.T) {
t.Parallel()
registryConfigPath := "/etc/containerd/certs.d"
tests := []struct {
existingFiles map[string]string
expectedFiles map[string]string
name string
username string
password string
registries []url.URL
mirrors []url.URL
resolveTags bool
createConfigPathDir bool
prependExisting bool
appendToBackup bool
}{
{
name: "multiple mirrors",
resolveTags: true,
registries: stringListToUrlList(t, []string{"http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000", "http://127.0.0.2:5000", "http://127.0.0.1:5001"}),
prependExisting: false,
name: "multiple mirros",
resolveTags: true,
registries: stringListToUrlList(t, []string{"http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000", "http://127.0.0.1:5001"}),
expectedFiles: map[string]string{
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
"/etc/containerd/certs.d/foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'
[host.'http://127.0.0.2:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'
[host.'http://127.0.0.1:5001']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
`,
},
},
{
name: "_default registry mirrors",
resolveTags: true,
registries: stringListToUrlList(t, []string{}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
prependExisting: false,
name: "resolve tags disabled",
resolveTags: false,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
expectedFiles: map[string]string{
"_default/hosts.toml": `[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
},
},
{
name: "resolve tags disabled",
resolveTags: false,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
prependExisting: false,
expectedFiles: map[string]string{
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
"/etc/containerd/certs.d/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull']
dial_timeout = '200ms'`,
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
`,
"/etc/containerd/certs.d/foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull']
dial_timeout = '200ms'`,
`,
},
},
{
@ -372,18 +158,19 @@ dial_timeout = '200ms'`,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
createConfigPathDir: false,
prependExisting: false,
expectedFiles: map[string]string{
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
"/etc/containerd/certs.d/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
`,
"/etc/containerd/certs.d/foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
`,
},
},
{
@ -392,18 +179,19 @@ dial_timeout = '200ms'`,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
createConfigPathDir: true,
prependExisting: false,
expectedFiles: map[string]string{
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
"/etc/containerd/certs.d/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
`,
"/etc/containerd/certs.d/foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
`,
},
},
{
@ -412,24 +200,25 @@ dial_timeout = '200ms'`,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
createConfigPathDir: true,
prependExisting: false,
existingFiles: map[string]string{
"docker.io/hosts.toml": "hello = 'world'",
"ghcr.io/hosts.toml": "foo = 'bar'",
"/etc/containerd/certs.d/docker.io/hosts.toml": "Hello World",
"/etc/containerd/certs.d/ghcr.io/hosts.toml": "Foo Bar",
},
expectedFiles: map[string]string{
"_backup/docker.io/hosts.toml": "hello = 'world'",
"_backup/ghcr.io/hosts.toml": "foo = 'bar'",
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
"/etc/containerd/certs.d/_backup/docker.io/hosts.toml": "Hello World",
"/etc/containerd/certs.d/_backup/ghcr.io/hosts.toml": "Foo Bar",
"/etc/containerd/certs.d/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
`,
"/etc/containerd/certs.d/foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
`,
},
},
{
@ -438,70 +227,53 @@ dial_timeout = '200ms'`,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
createConfigPathDir: true,
prependExisting: false,
existingFiles: map[string]string{
"_backup/docker.io/hosts.toml": "hello = 'world'",
"_backup/ghcr.io/hosts.toml": "foo = 'bar'",
"test.txt": "test",
"foo": "bar",
"/etc/containerd/certs.d/_backup/docker.io/hosts.toml": "Hello World",
"/etc/containerd/certs.d/_backup/ghcr.io/hosts.toml": "Foo Bar",
"/etc/containerd/certs.d/test.txt": "test",
"/etc/containerd/certs.d/foo": "bar",
},
expectedFiles: map[string]string{
"_backup/docker.io/hosts.toml": "hello = 'world'",
"_backup/ghcr.io/hosts.toml": "foo = 'bar'",
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
"/etc/containerd/certs.d/_backup/docker.io/hosts.toml": "Hello World",
"/etc/containerd/certs.d/_backup/ghcr.io/hosts.toml": "Foo Bar",
"/etc/containerd/certs.d/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
`,
"/etc/containerd/certs.d/foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
`,
},
},
{
name: "prepend to existing configuration",
name: "append to existing configuration",
resolveTags: true,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
createConfigPathDir: true,
prependExisting: true,
appendToBackup: true,
existingFiles: map[string]string{
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
"/etc/containerd/certs.d/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://example.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
[host.'http://example.com:30021']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
capabilities = ['pull', 'resolve']
[host.'http://bar.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']`,
`,
},
expectedFiles: map[string]string{
"_backup/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host.'http://example.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
[host.'http://example.com:30021']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
capabilities = ['pull', 'resolve']
[host.'http://bar.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']`,
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'
"/etc/containerd/certs.d/_backup/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://example.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
@ -509,267 +281,87 @@ client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
[host.'http://example.com:30021']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
`,
"/etc/containerd/certs.d/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host.'http://bar.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']`,
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
},
},
{
name: "prepend existing disabled",
resolveTags: true,
registries: stringListToUrlList(t, []string{"https://docker.io", "http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000"}),
createConfigPathDir: true,
prependExisting: false,
existingFiles: map[string]string{
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host.'http://example.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
capabilities = ['pull', 'resolve']
[host.'http://example.com:30021']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
capabilities = ['pull', 'resolve']
`,
"/etc/containerd/certs.d/foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host.'http://bar.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']`,
},
expectedFiles: map[string]string{
"_backup/docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host.'http://example.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
[host.'http://example.com:30021']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']
capabilities = ['pull', 'resolve']
[host.'http://bar.com:30020']
capabilities = ['pull', 'resolve']
client = ['/etc/certs/xxx/client.cert', '/etc/certs/xxx/client.key']`,
"docker.io/hosts.toml": `server = 'https://registry-1.docker.io'
[host]
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'`,
},
},
{
name: "with basic authentication",
resolveTags: true,
registries: stringListToUrlList(t, []string{"http://foo.bar:5000"}),
mirrors: stringListToUrlList(t, []string{"http://127.0.0.1:5000", "http://127.0.0.1:5001"}),
prependExisting: false,
username: "hello",
password: "world",
expectedFiles: map[string]string{
"foo.bar:5000/hosts.toml": `server = 'http://foo.bar:5000'
[host.'http://127.0.0.1:5000']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'
[host.'http://127.0.0.1:5000'.header]
Authorization = 'Basic aGVsbG86d29ybGQ='
[host.'http://127.0.0.1:5001']
capabilities = ['pull', 'resolve']
dial_timeout = '200ms'
[host.'http://127.0.0.1:5001'.header]
Authorization = 'Basic aGVsbG86d29ybGQ='`,
`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
registryConfigPath := filepath.Join(t.TempDir(), "etc", "containerd", "certs.d")
fs := afero.NewMemMapFs()
if tt.createConfigPathDir {
err := os.MkdirAll(registryConfigPath, 0o755)
err := fs.Mkdir(registryConfigPath, 0755)
require.NoError(t, err)
}
for k, v := range tt.existingFiles {
path := filepath.Join(registryConfigPath, k)
err := os.MkdirAll(filepath.Dir(path), 0o755)
require.NoError(t, err)
err = os.WriteFile(path, []byte(v), 0o644)
err := afero.WriteFile(fs, k, []byte(v), 0644)
require.NoError(t, err)
}
err := AddMirrorConfiguration(t.Context(), registryConfigPath, tt.registries, tt.mirrors, tt.resolveTags, tt.prependExisting, tt.username, tt.password)
err := AddMirrorConfiguration(context.TODO(), fs, registryConfigPath, tt.registries, tt.mirrors, tt.resolveTags, tt.appendToBackup)
require.NoError(t, err)
ok, err := dirExists(filepath.Join(registryConfigPath, "_backup"))
require.NoError(t, err)
require.True(t, ok)
seenExpectedFiles := maps.Clone(tt.expectedFiles)
err = filepath.Walk(registryConfigPath, func(path string, fi iofs.FileInfo, _ error) error {
if len(tt.existingFiles) == 0 {
ok, err := afero.DirExists(fs, "/etc/containerd/certs.d/_backup")
require.NoError(t, err)
require.False(t, ok)
}
err = afero.Walk(fs, registryConfigPath, func(path string, fi iofs.FileInfo, _ error) error {
if fi.IsDir() {
return nil
}
relPath, err := filepath.Rel(registryConfigPath, path)
require.NoError(t, err)
expectedContent, ok := tt.expectedFiles[relPath]
require.True(t, ok)
delete(seenExpectedFiles, relPath)
b, err := os.ReadFile(path)
expectedContent, ok := tt.expectedFiles[path]
require.True(t, ok, path)
b, err := afero.ReadFile(fs, path)
require.NoError(t, err)
require.Equal(t, expectedContent, string(b))
return nil
})
require.NoError(t, err)
require.Empty(t, seenExpectedFiles)
})
}
}
func TestMirrorConfigurationInvalidMirrorURL(t *testing.T) {
t.Parallel()
configPath := filepath.Join(t.TempDir(), "etc", "containerd", "certs.d")
fs := afero.NewMemMapFs()
mirrors := stringListToUrlList(t, []string{"http://127.0.0.1:5000"})
registries := stringListToUrlList(t, []string{"ftp://docker.io"})
err := AddMirrorConfiguration(t.Context(), configPath, registries, mirrors, true, false, "", "")
err := AddMirrorConfiguration(context.TODO(), fs, "/etc/containerd/certs.d", registries, mirrors, true, false)
require.EqualError(t, err, "invalid registry url scheme must be http or https: ftp://docker.io")
registries = stringListToUrlList(t, []string{"https://docker.io/foo/bar"})
err = AddMirrorConfiguration(t.Context(), configPath, registries, mirrors, true, false, "", "")
err = AddMirrorConfiguration(context.TODO(), fs, "/etc/containerd/certs.d", registries, mirrors, true, false)
require.EqualError(t, err, "invalid registry url path has to be empty: https://docker.io/foo/bar")
registries = stringListToUrlList(t, []string{"https://docker.io?foo=bar"})
err = AddMirrorConfiguration(t.Context(), configPath, registries, mirrors, true, false, "", "")
err = AddMirrorConfiguration(context.TODO(), fs, "/etc/containerd/certs.d", registries, mirrors, true, false)
require.EqualError(t, err, "invalid registry url query has to be empty: https://docker.io?foo=bar")
registries = stringListToUrlList(t, []string{"https://foo@docker.io"})
err = AddMirrorConfiguration(t.Context(), configPath, registries, mirrors, true, false, "", "")
err = AddMirrorConfiguration(context.TODO(), fs, "/etc/containerd/certs.d", registries, mirrors, true, false)
require.EqualError(t, err, "invalid registry url user has to be empty: https://foo@docker.io")
}
func TestExistingHosts(t *testing.T) {
t.Parallel()
configPath := t.TempDir()
u, err := url.Parse("https://ghcr.io")
require.NoError(t, err)
eh, err := existingHosts(configPath, *u)
require.NoError(t, err)
require.Empty(t, eh)
tomlHosts := `server = "https://registry-1.docker.io"
[host."https://mirror.registry"]
capabilities = ["pull"]
ca = "/etc/certs/mirror.pem"
skip_verify = false
[host."https://mirror.registry".header]
x-custom-2 = ["value1", "value2"]
[host]
[host."https://mirror-bak.registry/us"]
capabilities = ["pull"]
skip_verify = true
[host."http://mirror.registry"]
capabilities = ["pull"]
[host."https://test-3.registry"]
client = ["/etc/certs/client-1.pem", "/etc/certs/client-2.pem"]
[host."https://test-2.registry".header]
x-custom-2 = ["foo"]
[host."https://test-1.registry"]
capabilities = ["pull", "resolve", "push"]
ca = ["/etc/certs/test-1-ca.pem", "/etc/certs/special.pem"]
client = [["/etc/certs/client.cert", "/etc/certs/client.key"],["/etc/certs/client.pem", ""]]
[host."https://test-2.registry"]
client = "/etc/certs/client.pem"
[host."https://non-compliant-mirror.registry/v2/upstream"]
capabilities = ["pull"]
override_path = true`
err = os.MkdirAll(filepath.Join(configPath, backupDir, u.Host), 0o755)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(configPath, backupDir, u.Host, "hosts.toml"), []byte(tomlHosts), 0o644)
require.NoError(t, err)
eh, err = existingHosts(configPath, *u)
require.NoError(t, err)
expected := `[host.'https://mirror.registry']
ca = '/etc/certs/mirror.pem'
capabilities = ['pull']
skip_verify = false
[host.'https://mirror.registry'.header]
x-custom-2 = ['value1', 'value2']
[host.'https://mirror-bak.registry/us']
capabilities = ['pull']
skip_verify = true
[host.'http://mirror.registry']
capabilities = ['pull']
[host.'https://test-3.registry']
client = ['/etc/certs/client-1.pem', '/etc/certs/client-2.pem']
[host.'https://test-1.registry']
ca = ['/etc/certs/test-1-ca.pem', '/etc/certs/special.pem']
capabilities = ['pull', 'resolve', 'push']
client = [['/etc/certs/client.cert', '/etc/certs/client.key'], ['/etc/certs/client.pem', '']]
[host.'https://test-2.registry']
client = '/etc/certs/client.pem'
[host.'https://test-2.registry'.header]
x-custom-2 = ['foo']
[host.'https://non-compliant-mirror.registry/v2/upstream']
capabilities = ['pull']
override_path = true`
require.Equal(t, expected, eh)
}
func TestCleanupMirrorConfiguration(t *testing.T) {
t.Parallel()
configPath := filepath.Join(t.TempDir(), "certs.d")
err := os.MkdirAll(filepath.Join(configPath, "_backup"), 0o755)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(configPath, backupDir, "data.txt"), []byte("hello world"), 0o644)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(configPath, "foo.bin"), []byte("hello world"), 0o644)
require.NoError(t, err)
err = os.MkdirAll(filepath.Join(configPath, "docker.io"), 0o755)
require.NoError(t, err)
for range 2 {
err = CleanupMirrorConfiguration(t.Context(), configPath)
require.NoError(t, err)
files, err := os.ReadDir(configPath)
require.NoError(t, err)
require.Len(t, files, 1)
require.Equal(t, "data.txt", files[0].Name())
}
}
func stringListToUrlList(t *testing.T, list []string) []url.URL {
t.Helper()
urls := []url.URL{}
for _, item := range list {
u, err := url.Parse(item)

View File

@ -1,111 +0,0 @@
package oci
import (
"errors"
"fmt"
"net/url"
"regexp"
"github.com/opencontainers/go-digest"
)
var (
nameRegex = regexp.MustCompile(`([a-z0-9]+([._-][a-z0-9]+)*(/[a-z0-9]+([._-][a-z0-9]+)*)*)`)
tagRegex = regexp.MustCompile(`([a-zA-Z0-9_][a-zA-Z0-9._-]{0,127})`)
manifestRegexTag = regexp.MustCompile(`/v2/` + nameRegex.String() + `/manifests/` + tagRegex.String() + `$`)
manifestRegexDigest = regexp.MustCompile(`/v2/` + nameRegex.String() + `/manifests/(.*)`)
blobsRegexDigest = regexp.MustCompile(`/v2/` + nameRegex.String() + `/blobs/(.*)`)
)
// DistributionKind represents the kind of content.
type DistributionKind string
const (
DistributionKindManifest = "manifests"
DistributionKindBlob = "blobs"
)
// DistributionPath contains the individual parameters from a OCI distribution spec request.
type DistributionPath struct {
Kind DistributionKind
Name string
Digest digest.Digest
Tag string
Registry string
}
// Reference returns the digest if set or alternatively if not the full image reference with the tag.
func (d DistributionPath) Reference() string {
if d.Digest != "" {
return d.Digest.String()
}
return fmt.Sprintf("%s/%s:%s", d.Registry, d.Name, d.Tag)
}
// IsLatestTag returns true if the tag has the value latest.
func (d DistributionPath) IsLatestTag() bool {
return d.Tag == "latest"
}
// URL returns the reconstructed URL containing the path and query parameters.
func (d DistributionPath) URL() *url.URL {
ref := d.Digest.String()
if ref == "" {
ref = d.Tag
}
return &url.URL{
Scheme: "https",
Host: d.Registry,
Path: fmt.Sprintf("/v2/%s/%s/%s", d.Name, d.Kind, ref),
RawQuery: fmt.Sprintf("ns=%s", d.Registry),
}
}
// ParseDistributionPath gets the parameters from a URL which conforms with the OCI distribution spec.
// It returns a distribution path which contains all the individual parameters.
// https://github.com/opencontainers/distribution-spec/blob/main/spec.md
func ParseDistributionPath(u *url.URL) (DistributionPath, error) {
registry := u.Query().Get("ns")
comps := manifestRegexTag.FindStringSubmatch(u.Path)
if len(comps) == 6 {
if registry == "" {
return DistributionPath{}, errors.New("registry parameter needs to be set for tag references")
}
dist := DistributionPath{
Kind: DistributionKindManifest,
Name: comps[1],
Tag: comps[5],
Registry: registry,
}
return dist, nil
}
comps = manifestRegexDigest.FindStringSubmatch(u.Path)
if len(comps) == 6 {
dgst, err := digest.Parse(comps[5])
if err != nil {
return DistributionPath{}, err
}
dist := DistributionPath{
Kind: DistributionKindManifest,
Name: comps[1],
Digest: dgst,
Registry: registry,
}
return dist, nil
}
comps = blobsRegexDigest.FindStringSubmatch(u.Path)
if len(comps) == 6 {
dgst, err := digest.Parse(comps[5])
if err != nil {
return DistributionPath{}, err
}
dist := DistributionPath{
Kind: DistributionKindBlob,
Name: comps[1],
Digest: dgst,
Registry: registry,
}
return dist, nil
}
return DistributionPath{}, errors.New("distribution path could not be parsed")
}

View File

@ -1,149 +0,0 @@
package oci
import (
"fmt"
"net/url"
"testing"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/require"
)
func TestParseDistributionPath(t *testing.T) {
t.Parallel()
tests := []struct {
name string
registry string
path string
expectedName string
expectedDgst digest.Digest
expectedTag string
expectedRef string
expectedKind DistributionKind
execptedIsLatestTag bool
}{
{
name: "manifest tag",
registry: "example.com",
path: "/v2/foo/bar/manifests/hello-world",
expectedName: "foo/bar",
expectedDgst: "",
expectedTag: "hello-world",
expectedRef: "example.com/foo/bar:hello-world",
expectedKind: DistributionKindManifest,
execptedIsLatestTag: false,
},
{
name: "manifest with latest tag",
registry: "example.com",
path: "/v2/test/manifests/latest",
expectedName: "test",
expectedDgst: "",
expectedTag: "latest",
expectedRef: "example.com/test:latest",
expectedKind: DistributionKindManifest,
execptedIsLatestTag: true,
},
{
name: "manifest digest",
registry: "docker.io",
path: "/v2/library/nginx/manifests/sha256:0a404ca8e119d061cdb2dceee824c914cdc69b31bc7b5956ef5a520436a80d39",
expectedName: "library/nginx",
expectedDgst: digest.Digest("sha256:0a404ca8e119d061cdb2dceee824c914cdc69b31bc7b5956ef5a520436a80d39"),
expectedTag: "",
expectedRef: "sha256:0a404ca8e119d061cdb2dceee824c914cdc69b31bc7b5956ef5a520436a80d39",
expectedKind: DistributionKindManifest,
execptedIsLatestTag: false,
},
{
name: "blob digest",
registry: "docker.io",
path: "/v2/library/nginx/blobs/sha256:295c7be079025306c4f1d65997fcf7adb411c88f139ad1d34b537164aa060369",
expectedName: "library/nginx",
expectedDgst: digest.Digest("sha256:295c7be079025306c4f1d65997fcf7adb411c88f139ad1d34b537164aa060369"),
expectedTag: "",
expectedRef: "sha256:295c7be079025306c4f1d65997fcf7adb411c88f139ad1d34b537164aa060369",
expectedKind: DistributionKindBlob,
execptedIsLatestTag: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
u := &url.URL{
Path: tt.path,
RawQuery: fmt.Sprintf("ns=%s", tt.registry),
}
dist, err := ParseDistributionPath(u)
require.NoError(t, err)
require.Equal(t, tt.expectedName, dist.Name)
require.Equal(t, tt.expectedDgst, dist.Digest)
require.Equal(t, tt.expectedTag, dist.Tag)
require.Equal(t, tt.expectedRef, dist.Reference())
require.Equal(t, tt.expectedKind, dist.Kind)
require.Equal(t, tt.registry, dist.Registry)
require.Equal(t, tt.path, dist.URL().Path)
require.Equal(t, tt.registry, dist.URL().Query().Get("ns"))
require.Equal(t, tt.execptedIsLatestTag, dist.IsLatestTag())
})
}
}
func TestParseDistributionPathErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
url *url.URL
expectedError string
}{
{
name: "invalid path",
url: &url.URL{
Path: "/v2/spegel-org/spegel/v0.0.1",
RawQuery: "ns=example.com",
},
expectedError: "distribution path could not be parsed",
},
{
name: "blob with tag reference",
url: &url.URL{
Path: "/v2/spegel-org/spegel/blobs/v0.0.1",
RawQuery: "ns=example.com",
},
expectedError: "invalid checksum digest format",
},
{
name: "blob with invalid digest",
url: &url.URL{
Path: "/v2/spegel-org/spegel/blobs/sha256:123",
RawQuery: "ns=example.com",
},
expectedError: "invalid checksum digest length",
},
{
name: "manifest tag with missing registry",
url: &url.URL{
Path: "/v2/spegel-org/spegel/manifests/v0.0.1",
},
expectedError: "registry parameter needs to be set for tag references",
},
{
name: "manifest with invalid digest",
url: &url.URL{
Path: "/v2/spegel-org/spegel/manifests/sha253:foobar",
},
expectedError: "unsupported digest algorithm",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := ParseDistributionPath(tt.url)
require.EqualError(t, err, tt.expectedError)
})
}
}

View File

@ -1,7 +1,6 @@
package oci
import (
"errors"
"fmt"
"net/url"
"regexp"
@ -11,25 +10,42 @@ import (
)
type Image struct {
Name string
Registry string
Repository string
Tag string
Digest digest.Digest
}
func NewImage(registry, repository, tag string, dgst digest.Digest) (Image, error) {
type EventType string
const (
CreateEvent EventType = "CREATE"
UpdateEvent EventType = "UPDATE"
DeleteEvent EventType = "DELETE"
UnknownEvent EventType = ""
)
type ImageEvent struct {
Image Image
Type EventType
}
func NewImage(name, registry, repository, tag string, dgst digest.Digest) (Image, error) {
if name == "" {
return Image{}, fmt.Errorf("image needs to contain a name")
}
if registry == "" {
return Image{}, errors.New("image needs to contain a registry")
return Image{}, fmt.Errorf("image needs to contain a registry")
}
if repository == "" {
return Image{}, errors.New("image needs to contain a repository")
return Image{}, fmt.Errorf("image needs to repository a digest")
}
if dgst != "" {
if err := dgst.Validate(); err != nil {
return Image{}, err
}
if dgst == "" {
return Image{}, fmt.Errorf("image needs to contain a digest")
}
return Image{
Name: name,
Registry: registry,
Repository: repository,
Tag: tag,
@ -44,13 +60,9 @@ func (i Image) IsLatestTag() bool {
func (i Image) String() string {
tag := ""
if i.Tag != "" {
tag = ":" + i.Tag
tag = fmt.Sprintf(":%s", i.Tag)
}
digest := ""
if i.Digest != "" {
digest = "@" + i.Digest.String()
}
return fmt.Sprintf("%s/%s%s%s", i.Registry, i.Repository, tag, digest)
return fmt.Sprintf("%s/%s%s@%s", i.Registry, i.Repository, tag, i.Digest.String())
}
func (i Image) TagName() (string, bool) {
@ -62,19 +74,19 @@ func (i Image) TagName() (string, bool) {
var splitRe = regexp.MustCompile(`[:@]`)
func ParseImage(s string) (Image, error) {
func Parse(s string, extraDgst digest.Digest) (Image, error) {
if strings.Contains(s, "://") {
return Image{}, errors.New("invalid reference")
return Image{}, fmt.Errorf("invalid reference")
}
u, err := url.Parse("dummy://" + s)
if err != nil {
return Image{}, err
}
if u.Scheme != "dummy" {
return Image{}, errors.New("invalid reference")
return Image{}, fmt.Errorf("invalid reference")
}
if u.Host == "" {
return Image{}, errors.New("hostname required")
return Image{}, fmt.Errorf("hostname required")
}
var object string
if idx := splitRe.FindStringIndex(u.Path); idx != nil {
@ -90,33 +102,19 @@ func ParseImage(s string) (Image, error) {
tag, _, _ = strings.Cut(tag, "@")
repository := strings.TrimPrefix(u.Path, "/")
img, err := NewImage(u.Host, repository, tag, dgst)
if dgst == "" {
dgst = extraDgst
}
if extraDgst != "" && dgst != extraDgst {
return Image{}, fmt.Errorf("invalid digest set does not match parsed digest: %v %v", s, dgst)
}
img, err := NewImage(s, u.Host, repository, tag, dgst)
if err != nil {
return Image{}, err
}
return img, nil
}
func ParseImageRequireDigest(s string, dgst digest.Digest) (Image, error) {
img, err := ParseImage(s)
if err != nil {
return Image{}, err
}
if img.Digest != "" && dgst == "" {
return img, nil
}
if img.Digest == "" && dgst == "" {
return Image{}, errors.New("image needs to contain a digest")
}
if img.Digest == "" && dgst != "" {
return NewImage(img.Registry, img.Repository, img.Tag, dgst)
}
if img.Digest != dgst {
return Image{}, fmt.Errorf("invalid digest set does not match parsed digest: %v %v", s, img.Digest)
}
return img, nil
}
func splitObject(obj string) (tag string, dgst digest.Digest) {
parts := strings.SplitAfterN(obj, "@", 2)
if len(parts) < 2 {

View File

@ -8,17 +8,13 @@ import (
"github.com/stretchr/testify/require"
)
func TestParseImageRequireDigest(t *testing.T) {
t.Parallel()
func TestParseImage(t *testing.T) {
tests := []struct {
name string
image string
expectedRepository string
expectedTag string
expectedString string
expectedDigest digest.Digest
expectedIsLatest bool
digestInImage bool
}{
{
@ -28,8 +24,6 @@ func TestParseImageRequireDigest(t *testing.T) {
expectedRepository: "library/ubuntu",
expectedTag: "latest",
expectedDigest: digest.Digest("sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda"),
expectedIsLatest: true,
expectedString: "library/ubuntu:latest@sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda",
},
{
name: "Only tag",
@ -38,8 +32,6 @@ func TestParseImageRequireDigest(t *testing.T) {
expectedRepository: "library/alpine",
expectedTag: "3.18.0",
expectedDigest: digest.Digest("sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda"),
expectedIsLatest: false,
expectedString: "library/alpine:3.18.0@sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda",
},
{
name: "Tag and digest",
@ -48,8 +40,6 @@ func TestParseImageRequireDigest(t *testing.T) {
expectedRepository: "jetstack/cert-manager-controller",
expectedTag: "3.18.0",
expectedDigest: digest.Digest("sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda"),
expectedIsLatest: false,
expectedString: "jetstack/cert-manager-controller:3.18.0@sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda",
},
{
name: "Only digest",
@ -58,27 +48,14 @@ func TestParseImageRequireDigest(t *testing.T) {
expectedRepository: "fluxcd/helm-controller",
expectedTag: "",
expectedDigest: digest.Digest("sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda"),
expectedIsLatest: false,
expectedString: "fluxcd/helm-controller@sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda",
},
{
name: "Digest only in extra digest",
image: "foo/bar",
digestInImage: false,
expectedRepository: "foo/bar",
expectedDigest: digest.Digest("sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda"),
expectedIsLatest: false,
expectedString: "foo/bar@sha256:c0669ef34cdc14332c0f1ab0c2c01acb91d96014b172f1a76f3a39e63d1f0bda",
},
}
registries := []string{"docker.io", "quay.io", "ghcr.com", "127.0.0.1"}
for _, registry := range registries {
for _, tt := range tests {
t.Run(fmt.Sprintf("%s_%s", tt.name, registry), func(t *testing.T) {
t.Parallel()
for _, extraDgst := range []string{tt.expectedDigest.String(), ""} {
img, err := ParseImageRequireDigest(fmt.Sprintf("%s/%s", registry, tt.image), digest.Digest(extraDgst))
img, err := Parse(fmt.Sprintf("%s/%s", registry, tt.image), digest.Digest(extraDgst))
if !tt.digestInImage && extraDgst == "" {
require.EqualError(t, err, "image needs to contain a digest")
continue
@ -88,109 +65,19 @@ func TestParseImageRequireDigest(t *testing.T) {
require.Equal(t, tt.expectedRepository, img.Repository)
require.Equal(t, tt.expectedTag, img.Tag)
require.Equal(t, tt.expectedDigest, img.Digest)
require.Equal(t, tt.expectedIsLatest, img.IsLatestTag())
tagName, ok := img.TagName()
if tt.expectedTag == "" {
require.False(t, ok)
require.Empty(t, tagName)
} else {
require.True(t, ok)
require.Equal(t, registry+"/"+tt.expectedRepository+":"+tt.expectedTag, tagName)
}
require.Equal(t, fmt.Sprintf("%s/%s", registry, tt.expectedString), img.String())
}
})
}
}
}
func TestParseImageRequireDigestErrors(t *testing.T) {
t.Parallel()
tests := []struct {
name string
s string
dgst digest.Digest
expectedError string
}{
{
name: "digests do not match",
s: "quay.io/jetstack/cert-manager-webhook@sha256:13fd9eaadb4e491ef0e1d82de60cb199f5ad2ea5a3f8e0c19fdf31d91175b9cb",
dgst: digest.Digest("sha256:ec4306b243d98cce7c3b1f994f2dae660059ef521b2b24588cfdc950bd816d4c"),
expectedError: "invalid digest set does not match parsed digest: quay.io/jetstack/cert-manager-webhook@sha256:13fd9eaadb4e491ef0e1d82de60cb199f5ad2ea5a3f8e0c19fdf31d91175b9cb sha256:13fd9eaadb4e491ef0e1d82de60cb199f5ad2ea5a3f8e0c19fdf31d91175b9cb",
},
{
name: "no tag or digest",
s: "ghcr.io/spegel-org/spegel",
dgst: "",
expectedError: "image needs to contain a digest",
},
{
name: "reference contains protocol",
s: "https://example.com/test:latest",
dgst: "",
expectedError: "invalid reference",
},
{
name: "unparsable url",
s: "example%#$.com/foo",
dgst: "",
expectedError: "parse \"dummy://example%\": invalid URL escape \"%\"",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := ParseImageRequireDigest(tt.s, tt.dgst)
require.EqualError(t, err, tt.expectedError)
})
}
func TestParseImageDigestDoesNotMatch(t *testing.T) {
_, err := Parse("quay.io/jetstack/cert-manager-webhook@sha256:13fd9eaadb4e491ef0e1d82de60cb199f5ad2ea5a3f8e0c19fdf31d91175b9cb", digest.Digest("sha256:ec4306b243d98cce7c3b1f994f2dae660059ef521b2b24588cfdc950bd816d4c"))
require.EqualError(t, err, "invalid digest set does not match parsed digest: quay.io/jetstack/cert-manager-webhook@sha256:13fd9eaadb4e491ef0e1d82de60cb199f5ad2ea5a3f8e0c19fdf31d91175b9cb sha256:13fd9eaadb4e491ef0e1d82de60cb199f5ad2ea5a3f8e0c19fdf31d91175b9cb")
}
func TestNewImageErrors(t *testing.T) {
t.Parallel()
// TODO (phillebaba): Add test case for no digest or tag. One needs to be set.
tests := []struct {
name string
registry string
repository string
tag string
dgst digest.Digest
expectedError string
}{
{
name: "missing registry",
registry: "",
repository: "foo/bar",
tag: "latest",
dgst: digest.Digest("sha256:ec4306b243d98cce7c3b1f994f2dae660059ef521b2b24588cfdc950bd816d4c"),
expectedError: "image needs to contain a registry",
},
{
name: "missing repository",
registry: "example.com",
repository: "",
tag: "latest",
dgst: digest.Digest("sha256:ec4306b243d98cce7c3b1f994f2dae660059ef521b2b24588cfdc950bd816d4c"),
expectedError: "image needs to contain a repository",
},
{
name: "invalid digest",
registry: "example.com",
repository: "foo/bar",
tag: "latest",
dgst: digest.Digest("test"),
expectedError: "invalid checksum digest format",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
_, err := NewImage(tt.registry, tt.repository, tt.tag, tt.dgst)
require.EqualError(t, err, tt.expectedError)
})
}
func TestParseImageNoTagOrDigest(t *testing.T) {
_, err := Parse("ghcr.io/spegel-org/spegel", digest.Digest(""))
require.EqualError(t, err, "image needs to contain a digest")
}

View File

@ -1,133 +0,0 @@
package oci
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"sync"
"github.com/opencontainers/go-digest"
)
var _ Store = &Memory{}
type Memory struct {
blobs map[digest.Digest][]byte
tags map[string]digest.Digest
images []Image
mx sync.RWMutex
}
func NewMemory() *Memory {
return &Memory{
images: []Image{},
tags: map[string]digest.Digest{},
blobs: map[digest.Digest][]byte{},
}
}
func (m *Memory) Name() string {
return "memory"
}
func (m *Memory) Verify(ctx context.Context) error {
return nil
}
func (m *Memory) Subscribe(ctx context.Context) (<-chan OCIEvent, error) {
return nil, nil
}
func (m *Memory) ListImages(ctx context.Context) ([]Image, error) {
m.mx.RLock()
defer m.mx.RUnlock()
return m.images, nil
}
func (m *Memory) Resolve(ctx context.Context, ref string) (digest.Digest, error) {
m.mx.RLock()
defer m.mx.RUnlock()
dgst, ok := m.tags[ref]
if !ok {
return "", fmt.Errorf("could not resolve tag %s to a digest", ref)
}
return dgst, nil
}
func (m *Memory) ListContents(ctx context.Context) ([]Content, error) {
m.mx.RLock()
defer m.mx.RUnlock()
contents := []Content{}
for k := range m.blobs {
contents = append(contents, Content{Digest: k})
}
return contents, nil
}
func (m *Memory) Size(ctx context.Context, dgst digest.Digest) (int64, error) {
m.mx.RLock()
defer m.mx.RUnlock()
b, ok := m.blobs[dgst]
if !ok {
return 0, errors.Join(ErrNotFound, fmt.Errorf("size information for digest %s not found", dgst))
}
return int64(len(b)), nil
}
func (m *Memory) GetManifest(ctx context.Context, dgst digest.Digest) ([]byte, string, error) {
m.mx.RLock()
defer m.mx.RUnlock()
b, ok := m.blobs[dgst]
if !ok {
return nil, "", errors.Join(ErrNotFound, fmt.Errorf("manifest with digest %s not found", dgst))
}
mt, err := DetermineMediaType(b)
if err != nil {
return nil, "", err
}
return b, mt, nil
}
func (m *Memory) GetBlob(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
m.mx.RLock()
defer m.mx.RUnlock()
b, ok := m.blobs[dgst]
if !ok {
return nil, errors.Join(ErrNotFound, fmt.Errorf("blob with digest %s not found", dgst))
}
rc := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
return struct {
io.ReadSeeker
io.Closer
}{
ReadSeeker: rc,
Closer: io.NopCloser(nil),
}, nil
}
func (m *Memory) AddImage(img Image) {
m.mx.Lock()
defer m.mx.Unlock()
m.images = append(m.images, img)
tagName, ok := img.TagName()
if !ok {
return
}
m.tags[tagName] = img.Digest
}
func (m *Memory) AddBlob(b []byte, dgst digest.Digest) {
m.mx.Lock()
defer m.mx.Unlock()
m.blobs[dgst] = b
}

60
pkg/oci/mock.go Normal file
View File

@ -0,0 +1,60 @@
package oci
import (
"context"
"io"
"github.com/opencontainers/go-digest"
)
var _ Client = &MockClient{}
type MockClient struct {
images []Image
}
func NewMockClient(images []Image) *MockClient {
return &MockClient{
images: images,
}
}
func (m *MockClient) Name() string {
return "mock"
}
func (m *MockClient) Verify(ctx context.Context) error {
return nil
}
func (m *MockClient) Subscribe(ctx context.Context) (<-chan ImageEvent, <-chan error) {
return nil, nil
}
func (m *MockClient) ListImages(ctx context.Context) ([]Image, error) {
return m.images, nil
}
func (m *MockClient) AllIdentifiers(ctx context.Context, img Image) ([]string, error) {
return []string{img.Digest.String()}, nil
}
func (m *MockClient) Resolve(ctx context.Context, ref string) (digest.Digest, error) {
return "", nil
}
func (m *MockClient) Size(ctx context.Context, dgst digest.Digest) (int64, error) {
return 0, nil
}
func (m *MockClient) GetManifest(ctx context.Context, dgst digest.Digest) ([]byte, string, error) {
return nil, "", nil
}
func (m *MockClient) GetBlob(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) {
return nil, nil
}
func (m *MockClient) CopyLayer(ctx context.Context, dgst digest.Digest, dst io.Writer) error {
return nil
}

View File

@ -2,171 +2,25 @@ package oci
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/containerd/containerd/v2/core/images"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
var (
ErrNotFound = errors.New("content not found")
)
type EventType string
const (
CreateEvent EventType = "CREATE"
DeleteEvent EventType = "DELETE"
)
type OCIEvent struct {
Type EventType
Key string
}
type Content struct {
Digest digest.Digest
Registires []string
}
type Store interface {
// Name returns the name of the store implementation.
Name() string
// Verify checks that all expected configuration is set.
Verify(ctx context.Context) error
// Subscribe will notify for any image events ocuring in the store backend.
Subscribe(ctx context.Context) (<-chan OCIEvent, error)
// ListImages returns a list of all local images.
ListImages(ctx context.Context) ([]Image, error)
// Resolve returns the digest for the tagged image name reference.
// The ref is expected to be in the format `registry/name:tag`.
Resolve(ctx context.Context, ref string) (digest.Digest, error)
// ListContents returns a list of all the contents.
ListContents(ctx context.Context) ([]Content, error)
// Size returns the content byte size for the given digest.
// Will return ErrNotFound if the digest cannot be found.
Size(ctx context.Context, dgst digest.Digest) (int64, error)
// GetManifest returns the manifest content for the given digest.
// Will return ErrNotFound if the digest cannot be found.
GetManifest(ctx context.Context, dgst digest.Digest) ([]byte, string, error)
// GetBlob returns a stream of the blob content for the given digest.
// Will return ErrNotFound if the digest cannot be found.
GetBlob(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error)
}
type UnknownDocument struct {
MediaType string `json:"mediaType"`
specs.Versioned
MediaType string `json:"mediaType,omitempty"`
}
func DetermineMediaType(b []byte) (string, error) {
var ud UnknownDocument
if err := json.Unmarshal(b, &ud); err != nil {
return "", err
}
if ud.SchemaVersion == 2 && ud.MediaType != "" {
return ud.MediaType, nil
}
data := map[string]json.RawMessage{}
if err := json.Unmarshal(b, &data); err != nil {
return "", err
}
_, architectureOk := data["architecture"]
_, osOk := data["os"]
_, rootfsOk := data["rootfs"]
if architectureOk && osOk && rootfsOk {
return ocispec.MediaTypeImageConfig, nil
}
_, manifestsOk := data["manifests"]
if ud.SchemaVersion == 2 && manifestsOk {
return ocispec.MediaTypeImageIndex, nil
}
_, configOk := data["config"]
if ud.SchemaVersion == 2 && configOk {
return ocispec.MediaTypeImageManifest, nil
}
return "", errors.New("not able to determine media type")
}
func WalkImage(ctx context.Context, store Store, img Image) ([]digest.Digest, error) {
dgsts := []digest.Digest{}
err := walk(ctx, []digest.Digest{img.Digest}, func(dgst digest.Digest) ([]digest.Digest, error) {
b, mt, err := store.GetManifest(ctx, dgst)
if err != nil {
return nil, err
}
dgsts = append(dgsts, dgst)
switch mt {
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
var idx ocispec.Index
if err := json.Unmarshal(b, &idx); err != nil {
return nil, err
}
manifestDgsts := []digest.Digest{}
for _, m := range idx.Manifests {
_, err := store.Size(ctx, m.Digest)
if errors.Is(err, ErrNotFound) {
continue
}
if err != nil {
return nil, err
}
manifestDgsts = append(manifestDgsts, m.Digest)
}
if len(manifestDgsts) == 0 {
return nil, fmt.Errorf("could not find any platforms with local content in manifest %s", dgst)
}
return manifestDgsts, nil
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
var manifest ocispec.Manifest
err := json.Unmarshal(b, &manifest)
if err != nil {
return nil, err
}
dgsts = append(dgsts, manifest.Config.Digest)
for _, layer := range manifest.Layers {
dgsts = append(dgsts, layer.Digest)
}
return nil, nil
default:
return nil, fmt.Errorf("unexpected media type %s for digest %s", mt, dgst)
}
})
if err != nil {
return nil, fmt.Errorf("failed to walk image manifests: %w", err)
}
if len(dgsts) == 0 {
return nil, errors.New("no image digests found")
}
return dgsts, nil
}
func walk(ctx context.Context, dgsts []digest.Digest, handler func(dgst digest.Digest) ([]digest.Digest, error)) error {
for _, dgst := range dgsts {
children, err := handler(dgst)
if err != nil {
return err
}
if len(children) == 0 {
continue
}
err = walk(ctx, children, handler)
if err != nil {
return err
}
}
return nil
type Client interface {
Name() string
Verify(ctx context.Context) error
Subscribe(ctx context.Context) (<-chan ImageEvent, <-chan error)
ListImages(ctx context.Context) ([]Image, error)
AllIdentifiers(ctx context.Context, img Image) ([]string, error)
Resolve(ctx context.Context, ref string) (digest.Digest, error)
Size(ctx context.Context, dgst digest.Digest) (int64, error)
GetManifest(ctx context.Context, dgst digest.Digest) ([]byte, string, error)
GetBlob(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error)
// Deprecated: Use GetBlob.
CopyLayer(ctx context.Context, dgst digest.Digest, dst io.Writer) error
}

View File

@ -1,29 +1,27 @@
package oci
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path"
"path/filepath"
"testing"
"github.com/containerd/containerd/v2/client"
"github.com/containerd/containerd/v2/core/content"
"github.com/containerd/containerd/v2/core/images"
"github.com/containerd/containerd/v2/core/metadata"
"github.com/containerd/containerd/v2/pkg/namespaces"
"github.com/containerd/containerd/v2/plugins/content/local"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/require"
bolt "go.etcd.io/bbolt"
)
func TestStore(t *testing.T) {
t.Parallel()
func TestOCIClient(t *testing.T) {
b, err := os.ReadFile("./testdata/images.json")
require.NoError(t, err)
imgs := []map[string]string{}
@ -46,11 +44,11 @@ func TestStore(t *testing.T) {
contentPath := t.TempDir()
contentStore, err := local.NewStore(contentPath)
require.NoError(t, err)
boltDB, err := bolt.Open(path.Join(t.TempDir(), "bolt.db"), 0o644, nil)
boltDB, err := bolt.Open(path.Join(t.TempDir(), "bolt.db"), 0644, nil)
require.NoError(t, err)
db := metadata.NewDB(boltDB, contentStore, nil)
imageStore := metadata.NewImageStore(db)
ctx := namespaces.WithNamespace(t.Context(), "k8s.io")
ctx := namespaces.WithNamespace(context.TODO(), "k8s.io")
for _, img := range imgs {
dgst, err := digest.Parse(img["digest"])
require.NoError(t, err)
@ -74,7 +72,7 @@ func TestStore(t *testing.T) {
require.NoError(t, err)
writer.Close()
}
containerdClient, err := client.New("", client.WithServices(client.WithImageStore(imageStore), client.WithContentStore(contentStore)))
containerdClient, err := containerd.New("", containerd.WithServices(containerd.WithImageStore(imageStore), containerd.WithContentStore(contentStore)))
require.NoError(t, err)
remoteContainerd := &Containerd{
client: containerdClient,
@ -84,55 +82,25 @@ func TestStore(t *testing.T) {
client: containerdClient,
}
memoryClient := NewMemory()
for _, img := range imgs {
dgst, err := digest.Parse(img["digest"])
require.NoError(t, err)
img, err := ParseImageRequireDigest(img["name"], dgst)
require.NoError(t, err)
memoryClient.AddImage(img)
}
for k, v := range blobs {
memoryClient.AddBlob(v, k)
}
for _, ociStore := range []Store{remoteContainerd, localContainerd, memoryClient} {
t.Run(ociStore.Name(), func(t *testing.T) {
t.Parallel()
b, mt, err := ociStore.GetManifest(ctx, digest.FromString("foo"))
require.Empty(t, b)
require.Empty(t, mt)
require.ErrorIs(t, err, ErrNotFound)
rc, err := ociStore.GetBlob(ctx, digest.FromString("foo"))
require.Empty(t, rc)
require.ErrorIs(t, err, ErrNotFound)
size, err := ociStore.Size(ctx, digest.FromString("foo"))
require.Empty(t, size)
require.ErrorIs(t, err, ErrNotFound)
imgs, err := ociStore.ListImages(ctx)
for _, ociClient := range []Client{remoteContainerd, localContainerd} {
t.Run(ociClient.Name(), func(t *testing.T) {
imgs, err := ociClient.ListImages(ctx)
require.NoError(t, err)
require.Len(t, imgs, 5)
for _, img := range imgs {
tagName, ok := img.TagName()
require.True(t, ok)
_, err := ociStore.Resolve(ctx, tagName)
_, err := ociClient.Resolve(ctx, img.Name)
require.NoError(t, err)
}
noPlatformImg := Image{
Registry: "example.com",
Repository: "org/no-platform",
Tag: "test",
}
tagName, ok := noPlatformImg.TagName()
require.True(t, ok)
dgst, err := ociStore.Resolve(ctx, tagName)
noPlatformName := "example.com/org/no-platform:test"
dgst, err := ociClient.Resolve(ctx, noPlatformName)
require.NoError(t, err)
noPlatformImg.Digest = dgst
_, err = WalkImage(ctx, ociStore, noPlatformImg)
require.EqualError(t, err, "failed to walk image manifests: could not find any platforms with local content in manifest sha256:addc990c58744bdf96364fe89bd4aab38b1e824d51c688edb36c75247cd45fa9")
img := Image{
Name: noPlatformName,
Digest: dgst,
}
_, err = ociClient.AllIdentifiers(ctx, img)
require.EqualError(t, err, "failed to walk image manifests: could not find any platforms with local content in manifest list: sha256:addc990c58744bdf96364fe89bd4aab38b1e824d51c688edb36c75247cd45fa9")
contentTests := []struct {
mediaType string
@ -162,18 +130,16 @@ func TestStore(t *testing.T) {
}
for _, tt := range contentTests {
t.Run(tt.mediaType, func(t *testing.T) {
t.Parallel()
size, err := ociStore.Size(ctx, tt.dgst)
size, err := ociClient.Size(ctx, tt.dgst)
require.NoError(t, err)
require.Equal(t, tt.size, size)
if tt.mediaType != ocispec.MediaTypeImageLayer {
b, mediaType, err := ociStore.GetManifest(ctx, tt.dgst)
b, mediaType, err := ociClient.GetManifest(ctx, tt.dgst)
require.NoError(t, err)
require.Equal(t, tt.mediaType, mediaType)
require.Equal(t, blobs[tt.dgst], b)
} else {
rc, err := ociStore.GetBlob(ctx, tt.dgst)
rc, err := ociClient.GetBlob(ctx, tt.dgst)
require.NoError(t, err)
defer rc.Close()
b, err := io.ReadAll(rc)
@ -184,14 +150,14 @@ func TestStore(t *testing.T) {
}
identifiersTests := []struct {
imageName string
imageDigest string
expectedDgsts []digest.Digest
imageName string
imageDigest string
expectedKeys []string
}{
{
imageName: "ghcr.io/spegel-org/spegel:v0.0.8-with-media-type",
imageDigest: "sha256:9506c8e7a2d0a098d43cadfd7ecdc3c91697e8188d3a1245943b669f717747b4",
expectedDgsts: []digest.Digest{
expectedKeys: []string{
"sha256:9506c8e7a2d0a098d43cadfd7ecdc3c91697e8188d3a1245943b669f717747b4",
"sha256:44cb2cf712c060f69df7310e99339c1eb51a085446f1bb6d44469acff35b4355",
"sha256:d715ba0d85ee7d37da627d0679652680ed2cb23dde6120f25143a0b8079ee47e",
@ -237,7 +203,7 @@ func TestStore(t *testing.T) {
{
imageName: "ghcr.io/spegel-org/spegel:v0.0.8-without-media-type",
imageDigest: "sha256:d8df04365d06181f037251de953aca85cc16457581a8fc168f4957c978e1008b",
expectedDgsts: []digest.Digest{
expectedKeys: []string{
"sha256:d8df04365d06181f037251de953aca85cc16457581a8fc168f4957c978e1008b",
"sha256:44cb2cf712c060f69df7310e99339c1eb51a085446f1bb6d44469acff35b4355",
"sha256:d715ba0d85ee7d37da627d0679652680ed2cb23dde6120f25143a0b8079ee47e",
@ -283,66 +249,13 @@ func TestStore(t *testing.T) {
}
for _, tt := range identifiersTests {
t.Run(tt.imageName, func(t *testing.T) {
t.Parallel()
img, err := ParseImageRequireDigest(tt.imageName, digest.Digest(tt.imageDigest))
img, err := Parse(tt.imageName, digest.Digest(tt.imageDigest))
require.NoError(t, err)
dgsts, err := WalkImage(ctx, ociStore, img)
keys, err := ociClient.AllIdentifiers(ctx, img)
require.NoError(t, err)
require.Equal(t, tt.expectedDgsts, dgsts)
require.Equal(t, tt.expectedKeys, keys)
})
}
})
}
}
func TestDetermineMediaType(t *testing.T) {
t.Parallel()
tests := []struct {
name string
dgst digest.Digest
expectedMediaType string
}{
{
name: "image config",
dgst: digest.Digest("sha256:68b8a989a3e08ddbdb3a0077d35c0d0e59c9ecf23d0634584def8bdbb7d6824f"),
expectedMediaType: ocispec.MediaTypeImageConfig,
},
{
name: "image index",
dgst: digest.Digest("sha256:9430beb291fa7b96997711fc486bc46133c719631aefdbeebe58dd3489217bfe"),
expectedMediaType: ocispec.MediaTypeImageIndex,
},
{
name: "image index without media type",
dgst: digest.Digest("sha256:d8df04365d06181f037251de953aca85cc16457581a8fc168f4957c978e1008b"),
expectedMediaType: ocispec.MediaTypeImageIndex,
},
{
name: "image manifest",
dgst: digest.Digest("sha256:dce623533c59af554b85f859e91fc1cbb7f574e873c82f36b9ea05a09feb0b53"),
expectedMediaType: ocispec.MediaTypeImageManifest,
},
{
name: "image manifest without media type",
dgst: digest.Digest("sha256:b6d6089ca6c395fd563c2084f5dd7bc56a2f5e6a81413558c5be0083287a77e9"),
expectedMediaType: ocispec.MediaTypeImageManifest,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
b, err := os.ReadFile(filepath.Join("testdata", "blobs", tt.dgst.Algorithm().String(), tt.dgst.Encoded()))
require.NoError(t, err)
mt, err := DetermineMediaType(b)
require.NoError(t, err)
require.Equal(t, tt.expectedMediaType, mt)
})
}
mt, err := DetermineMediaType([]byte("{}"))
require.EqualError(t, err, "not able to determine media type")
require.Empty(t, mt)
}

View File

@ -1,15 +0,0 @@
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"digest": "sha256:68b8a989a3e08ddbdb3a0077d35c0d0e59c9ecf23d0634584def8bdbb7d6824f",
"size": 529
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"digest": "sha256:3caa2469de2a23cbcc209dd0b9d01cd78ff9a0f88741655991d36baede5b0996",
"size": 118
}
]
}

View File

@ -0,0 +1,48 @@
package registry
import (
"fmt"
"regexp"
"github.com/opencontainers/go-digest"
)
type referenceType string
const (
referenceTypeManifest = "Manifest"
referenceTypeBlob = "Blob"
)
// Package is used to parse components from requests which comform with the OCI distribution spec.
// https://github.com/opencontainers/distribution-spec/blob/main/spec.md
// /v2/<name>/manifests/<reference>
// /v2/<name>/blobs/<reference>
var (
nameRegex = regexp.MustCompile(`([a-z0-9]+([._-][a-z0-9]+)*(/[a-z0-9]+([._-][a-z0-9]+)*)*)`)
tagRegex = regexp.MustCompile(`([a-zA-Z0-9_][a-zA-Z0-9._-]{0,127})`)
manifestRegexTag = regexp.MustCompile(`/v2/` + nameRegex.String() + `/manifests/` + tagRegex.String() + `$`)
manifestRegexDigest = regexp.MustCompile(`/v2/` + nameRegex.String() + `/manifests/(.*)`)
blobsRegexDigest = regexp.MustCompile(`/v2/` + nameRegex.String() + `/blobs/(.*)`)
)
func parsePathComponents(registry, path string) (string, digest.Digest, referenceType, error) {
comps := manifestRegexTag.FindStringSubmatch(path)
if len(comps) == 6 {
if registry == "" {
return "", "", "", fmt.Errorf("registry parameter needs to be set for tag references")
}
ref := fmt.Sprintf("%s/%s:%s", registry, comps[1], comps[5])
return ref, "", referenceTypeManifest, nil
}
comps = manifestRegexDigest.FindStringSubmatch(path)
if len(comps) == 6 {
return "", digest.Digest(comps[5]), referenceTypeManifest, nil
}
comps = blobsRegexDigest.FindStringSubmatch(path)
if len(comps) == 6 {
return "", digest.Digest(comps[5]), referenceTypeBlob, nil
}
return "", "", "", fmt.Errorf("distribution path could not be parsed")
}

View File

@ -0,0 +1,55 @@
package registry
import (
"testing"
"github.com/opencontainers/go-digest"
"github.com/stretchr/testify/require"
)
func TestParsePathComponents(t *testing.T) {
tests := []struct {
name string
registry string
path string
expectedRef string
expectedDgst digest.Digest
expectedRefType referenceType
}{
{
name: "valid manifest tag",
registry: "example.com",
path: "/v2/foo/bar/manifests/hello-world",
expectedRef: "example.com/foo/bar:hello-world",
expectedDgst: "",
expectedRefType: referenceTypeManifest,
},
{
name: "valid blob digest",
registry: "docker.io",
path: "/v2/library/nginx/blobs/sha256:295c7be079025306c4f1d65997fcf7adb411c88f139ad1d34b537164aa060369",
expectedRef: "",
expectedDgst: digest.Digest("sha256:295c7be079025306c4f1d65997fcf7adb411c88f139ad1d34b537164aa060369"),
expectedRefType: referenceTypeBlob,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ref, dgst, refType, err := parsePathComponents(tt.registry, tt.path)
require.NoError(t, err)
require.Equal(t, tt.expectedRef, ref)
require.Equal(t, tt.expectedDgst, dgst)
require.Equal(t, tt.expectedRefType, refType)
})
}
}
func TestParsePathComponentsInvalidPath(t *testing.T) {
_, _, _, err := parsePathComponents("example.com", "/v2/spegel-org/spegel/v0.0.1")
require.EqualError(t, err, "distribution path could not be parsed")
}
func TestParsePathComponentsMissingRegistry(t *testing.T) {
_, _, _, err := parsePathComponents("", "/v2/spegel-org/spegel/manifests/v0.0.1")
require.EqualError(t, err, "registry parameter needs to be set for tag references")
}

View File

@ -2,169 +2,157 @@ package registry
import (
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/netip"
"net/url"
"path"
"strconv"
"sync"
"strings"
"time"
"github.com/go-logr/logr"
"github.com/opencontainers/go-digest"
"github.com/spegel-org/spegel/pkg/httpx"
"github.com/spegel-org/spegel/internal/mux"
"github.com/spegel-org/spegel/pkg/metrics"
"github.com/spegel-org/spegel/pkg/oci"
"github.com/spegel-org/spegel/pkg/routing"
"github.com/spegel-org/spegel/pkg/throttle"
)
const (
HeaderSpegelMirrored = "X-Spegel-Mirrored"
MirroredHeaderKey = "X-Spegel-Mirrored"
)
type RegistryConfig struct {
Client *http.Client
Log logr.Logger
Username string
Password string
ResolveRetries int
ResolveLatestTag bool
ResolveTimeout time.Duration
}
func (cfg *RegistryConfig) Apply(opts ...RegistryOption) error {
for _, opt := range opts {
if opt == nil {
continue
}
if err := opt(cfg); err != nil {
return err
}
}
return nil
}
type RegistryOption func(cfg *RegistryConfig) error
func WithResolveRetries(resolveRetries int) RegistryOption {
return func(cfg *RegistryConfig) error {
cfg.ResolveRetries = resolveRetries
return nil
}
}
func WithResolveLatestTag(resolveLatestTag bool) RegistryOption {
return func(cfg *RegistryConfig) error {
cfg.ResolveLatestTag = resolveLatestTag
return nil
}
}
func WithResolveTimeout(resolveTimeout time.Duration) RegistryOption {
return func(cfg *RegistryConfig) error {
cfg.ResolveTimeout = resolveTimeout
return nil
}
}
func WithTransport(transport http.RoundTripper) RegistryOption {
return func(cfg *RegistryConfig) error {
if cfg.Client == nil {
cfg.Client = &http.Client{}
}
cfg.Client.Transport = transport
return nil
}
}
func WithLogger(log logr.Logger) RegistryOption {
return func(cfg *RegistryConfig) error {
cfg.Log = log
return nil
}
}
func WithBasicAuth(username, password string) RegistryOption {
return func(cfg *RegistryConfig) error {
cfg.Username = username
cfg.Password = password
return nil
}
}
type Registry struct {
client *http.Client
bufferPool *sync.Pool
log logr.Logger
ociStore oci.Store
throttler *throttle.Throttler
ociClient oci.Client
router routing.Router
username string
password string
httpClient *http.Client
localAddr string
resolveRetries int
resolveTimeout time.Duration
resolveLatestTag bool
}
func NewRegistry(ociStore oci.Store, router routing.Router, opts ...RegistryOption) (*Registry, error) {
transport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("default transporn is not of type http.Transport")
}
cfg := RegistryConfig{
Client: &http.Client{
Transport: transport.Clone(),
},
Log: logr.Discard(),
ResolveRetries: 3,
ResolveLatestTag: true,
ResolveTimeout: 20 * time.Millisecond,
}
err := cfg.Apply(opts...)
if err != nil {
return nil, err
}
type Option func(*Registry)
bufferPool := &sync.Pool{
New: func() any {
buf := make([]byte, 32*1024)
return &buf
},
func WithResolveRetries(resolveRetries int) Option {
return func(r *Registry) {
r.resolveRetries = resolveRetries
}
r := &Registry{
ociStore: ociStore,
router: router,
client: cfg.Client,
log: cfg.Log,
resolveRetries: cfg.ResolveRetries,
resolveLatestTag: cfg.ResolveLatestTag,
resolveTimeout: cfg.ResolveTimeout,
username: cfg.Username,
password: cfg.Password,
bufferPool: bufferPool,
}
return r, nil
}
func (r *Registry) Server(addr string) (*http.Server, error) {
m := httpx.NewServeMux(r.log)
m.Handle("GET /healthz", r.readyHandler)
m.Handle("GET /v2/", r.registryHandler)
m.Handle("HEAD /v2/", r.registryHandler)
func WithResolveLatestTag(resolveLatestTag bool) Option {
return func(r *Registry) {
r.resolveLatestTag = resolveLatestTag
}
}
func WithResolveTimeout(resolveTimeout time.Duration) Option {
return func(r *Registry) {
r.resolveTimeout = resolveTimeout
}
}
func WithTransport(transport http.RoundTripper) Option {
return func(r *Registry) {
r.httpClient.Transport = transport
}
}
func WithLocalAddress(localAddr string) Option {
return func(r *Registry) {
r.localAddr = localAddr
}
}
func WithBlobSpeed(blobSpeed throttle.Byterate) Option {
return func(r *Registry) {
r.throttler = throttle.NewThrottler(blobSpeed)
}
}
func WithLogger(log logr.Logger) Option {
return func(r *Registry) {
r.log = log
}
}
func NewRegistry(ociClient oci.Client, router routing.Router, opts ...Option) *Registry {
r := &Registry{
ociClient: ociClient,
router: router,
httpClient: &http.Client{},
resolveRetries: 3,
resolveTimeout: 1 * time.Second,
resolveLatestTag: true,
}
for _, opt := range opts {
opt(r)
}
return r
}
func (r *Registry) Server(addr string) *http.Server {
srv := &http.Server{
Addr: addr,
Handler: m,
Handler: mux.NewServeMux(r.handle),
}
return srv, nil
return srv
}
func (r *Registry) readyHandler(rw httpx.ResponseWriter, req *http.Request) {
rw.SetHandler("ready")
ok, err := r.router.Ready(req.Context())
func (r *Registry) handle(rw mux.ResponseWriter, req *http.Request) {
start := time.Now()
handler := req.URL.Path
if strings.HasPrefix(handler, "/v2") {
handler = "/v2/*"
}
defer func() {
latency := time.Since(start)
statusCode := strconv.FormatInt(int64(rw.Status()), 10)
metrics.HttpRequestsInflight.WithLabelValues(handler).Add(-1)
metrics.HttpRequestDurHistogram.WithLabelValues(handler, req.Method, statusCode).Observe(latency.Seconds())
metrics.HttpResponseSizeHistogram.WithLabelValues(handler, req.Method, statusCode).Observe(float64(rw.Size()))
// Ignore logging requests to healthz to reduce log noise
if req.URL.Path == "/healthz" {
return
}
// Logging
ip := getClientIP(req)
path := req.URL.Path
kvs := []interface{}{"path", path, "status", rw.Status(), "method", req.Method, "latency", latency, "ip", ip}
if rw.Status() >= 200 && rw.Status() < 300 {
r.log.Info("", kvs...)
return
}
r.log.Error(rw.Error(), "", kvs...)
}()
metrics.HttpRequestsInflight.WithLabelValues(handler).Add(1)
if req.URL.Path == "/healthz" && req.Method == http.MethodGet {
r.readyHandler(rw, req)
return
}
if strings.HasPrefix(req.URL.Path, "/v2") && (req.Method == http.MethodGet || req.Method == http.MethodHead) {
r.registryHandler(rw, req)
return
}
rw.WriteHeader(http.StatusNotFound)
}
func (r *Registry) readyHandler(rw mux.ResponseWriter, req *http.Request) {
ok, err := r.router.Ready()
if err != nil {
rw.WriteError(http.StatusInternalServerError, fmt.Errorf("could not determine router readiness: %w", err))
rw.WriteError(http.StatusInternalServerError, err)
return
}
if !ok {
@ -173,213 +161,205 @@ func (r *Registry) readyHandler(rw httpx.ResponseWriter, req *http.Request) {
}
}
func (r *Registry) registryHandler(rw httpx.ResponseWriter, req *http.Request) {
rw.SetHandler("registry")
// Check basic authentication
if r.username != "" || r.password != "" {
username, password, _ := req.BasicAuth()
if r.username != username || r.password != password {
rw.WriteError(http.StatusUnauthorized, errors.New("invalid basic authentication"))
return
}
}
func (r *Registry) registryHandler(rw mux.ResponseWriter, req *http.Request) {
// Quickly return 200 for /v2 to indicate that registry supports v2.
if path.Clean(req.URL.Path) == "/v2" {
rw.SetHandler("v2")
rw.WriteHeader(http.StatusOK)
return
}
// Parse out path components from request.
dist, err := oci.ParseDistributionPath(req.URL)
registryName := req.URL.Query().Get("ns")
ref, dgst, refType, err := parsePathComponents(registryName, req.URL.Path)
if err != nil {
rw.WriteError(http.StatusNotFound, fmt.Errorf("could not parse path according to OCI distribution spec: %w", err))
rw.WriteError(http.StatusNotFound, err)
return
}
// Request with mirror header are proxied.
if req.Header.Get(HeaderSpegelMirrored) != "true" {
// Set mirrored header in request to stop infinite loops
req.Header.Set(HeaderSpegelMirrored, "true")
// If content is present locally we should skip the mirroring and just serve it.
var ociErr error
if dist.Digest == "" {
_, ociErr = r.ociStore.Resolve(req.Context(), dist.Reference())
} else {
_, ociErr = r.ociStore.Size(req.Context(), dist.Digest)
}
if ociErr != nil {
rw.SetHandler("mirror")
r.handleMirror(rw, req, dist)
// Check if latest tag should be resolved
if !r.resolveLatestTag && ref != "" {
_, tag, _ := strings.Cut(ref, ":")
if tag == "latest" {
rw.WriteHeader(http.StatusNotFound)
return
}
}
// Serve registry endpoints.
switch dist.Kind {
case oci.DistributionKindManifest:
rw.SetHandler("manifest")
r.handleManifest(rw, req, dist)
return
case oci.DistributionKindBlob:
rw.SetHandler("blob")
r.handleBlob(rw, req, dist)
return
default:
rw.WriteError(http.StatusNotFound, fmt.Errorf("unknown distribution path kind %s", dist.Kind))
return
}
}
func (r *Registry) handleMirror(rw httpx.ResponseWriter, req *http.Request, dist oci.DistributionPath) {
log := r.log.WithValues("ref", dist.Reference(), "path", req.URL.Path)
defer func() {
// Requests without mirror header set will be mirrored
if req.Header.Get(MirroredHeaderKey) != "true" {
key := dgst.String()
if key == "" {
key = ref
}
r.handleMirror(rw, req, key)
sourceType := "internal"
if r.isExternalRequest(req) {
sourceType = "external"
}
cacheType := "hit"
if rw.Status() != http.StatusOK {
cacheType = "miss"
}
metrics.MirrorRequestsTotal.WithLabelValues(dist.Registry, cacheType).Inc()
}()
if !r.resolveLatestTag && dist.IsLatestTag() {
r.log.V(4).Info("skipping mirror request for image with latest tag", "image", dist.Reference())
rw.WriteHeader(http.StatusNotFound)
metrics.MirrorRequestsTotal.WithLabelValues(registryName, cacheType, sourceType).Inc()
return
}
// Resolve mirror with the requested reference
// Serve registry endpoints.
if dgst == "" {
dgst, err = r.ociClient.Resolve(req.Context(), ref)
if err != nil {
rw.WriteError(http.StatusNotFound, err)
return
}
}
switch refType {
case referenceTypeManifest:
r.handleManifest(rw, req, dgst)
case referenceTypeBlob:
r.handleBlob(rw, req, dgst)
default:
// If nothing matches return 404.
rw.WriteHeader(http.StatusNotFound)
}
}
func (r *Registry) handleMirror(rw mux.ResponseWriter, req *http.Request, key string) {
log := r.log.WithValues("key", key, "path", req.URL.Path, "ip", req.RemoteAddr)
// Resolve mirror with the requested key
resolveCtx, cancel := context.WithTimeout(req.Context(), r.resolveTimeout)
defer cancel()
resolveCtx = logr.NewContext(resolveCtx, log)
peerCh, err := r.router.Resolve(resolveCtx, dist.Reference(), r.resolveRetries)
isExternal := r.isExternalRequest(req)
if isExternal {
log.Info("handling mirror request from external node")
}
peerCh, err := r.router.Resolve(resolveCtx, key, isExternal, r.resolveRetries)
if err != nil {
rw.WriteError(http.StatusInternalServerError, fmt.Errorf("error occurred when attempting to resolve mirrors: %w", err))
rw.WriteError(http.StatusInternalServerError, err)
return
}
mirrorAttempts := 0
// TODO: Refactor context cancel and mirror channel closing
for {
select {
case <-req.Context().Done():
case <-resolveCtx.Done():
// Request has been closed by server or client. No use continuing.
rw.WriteError(http.StatusNotFound, fmt.Errorf("mirroring for image component %s has been cancelled: %w", dist.Reference(), resolveCtx.Err()))
rw.WriteError(http.StatusNotFound, fmt.Errorf("request closed for key: %s", key))
return
case peer, ok := <-peerCh:
case ipAddr, ok := <-peerCh:
// Channel closed means no more mirrors will be received and max retries has been reached.
if !ok {
err = fmt.Errorf("mirror with image component %s could not be found", dist.Reference())
if mirrorAttempts > 0 {
err = errors.Join(err, fmt.Errorf("requests to %d mirrors failed, all attempts have been exhausted or timeout has been reached", mirrorAttempts))
}
rw.WriteError(http.StatusNotFound, err)
rw.WriteError(http.StatusNotFound, fmt.Errorf("mirror resolve retries exhausted for key: %s", key))
return
}
mirrorAttempts++
err := forwardRequest(r.client, r.bufferPool, req, rw, peer)
if err != nil {
log.Error(err, "request to mirror failed", "attempt", mirrorAttempts, "path", req.URL.Path, "mirror", peer)
continue
scheme := "http"
if req.TLS != nil {
scheme = "https"
}
log.V(4).Info("mirrored request", "path", req.URL.Path, "mirror", peer)
u := url.URL{
Scheme: scheme,
Host: ipAddr.String(),
Path: req.URL.Path,
// TODO: Should this error early if not set?
RawQuery: fmt.Sprintf("ns=%s", req.URL.Query().Get("ns")),
}
forwardReq, err := http.NewRequestWithContext(req.Context(), req.Method, u.String(), nil)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
forwardReq.Header.Add(MirroredHeaderKey, "true")
resp, err := r.httpClient.Do(forwardReq)
if err != nil {
log.Error(err, "mirror failed attempting next")
break
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Error(fmt.Errorf("expected mirror to respond with 200 OK but received: %s", resp.Status), "mirror failed attempting next")
break
}
for k, v := range resp.Header {
for _, vv := range v {
rw.Header().Add(k, vv)
}
}
_, err = io.Copy(rw, resp.Body)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
log.V(5).Info("mirrored request", "url", u.String())
return
}
}
}
func (r *Registry) handleManifest(rw httpx.ResponseWriter, req *http.Request, dist oci.DistributionPath) {
if dist.Digest == "" {
dgst, err := r.ociStore.Resolve(req.Context(), dist.Reference())
if err != nil {
rw.WriteError(http.StatusNotFound, fmt.Errorf("could not get digest for image %s: %w", dist.Reference(), err))
return
}
dist.Digest = dgst
}
b, mediaType, err := r.ociStore.GetManifest(req.Context(), dist.Digest)
func (r *Registry) handleManifest(rw mux.ResponseWriter, req *http.Request, dgst digest.Digest) {
b, mediaType, err := r.ociClient.GetManifest(req.Context(), dgst)
if err != nil {
rw.WriteError(http.StatusNotFound, fmt.Errorf("could not get manifest content for digest %s: %w", dist.Digest.String(), err))
rw.WriteError(http.StatusNotFound, err)
return
}
rw.Header().Set(httpx.HeaderContentType, mediaType)
rw.Header().Set(httpx.HeaderContentLength, strconv.FormatInt(int64(len(b)), 10))
rw.Header().Set(oci.HeaderDockerDigest, dist.Digest.String())
rw.Header().Set("Content-Type", mediaType)
rw.Header().Set("Content-Length", strconv.FormatInt(int64(len(b)), 10))
rw.Header().Set("Docker-Content-Digest", dgst.String())
if req.Method == http.MethodHead {
return
}
_, err = rw.Write(b)
if err != nil {
r.log.Error(err, "error occurred when writing manifest")
rw.WriteError(http.StatusNotFound, err)
return
}
}
func (r *Registry) handleBlob(rw httpx.ResponseWriter, req *http.Request, dist oci.DistributionPath) {
size, err := r.ociStore.Size(req.Context(), dist.Digest)
func (r *Registry) handleBlob(rw mux.ResponseWriter, req *http.Request, dgst digest.Digest) {
size, err := r.ociClient.Size(req.Context(), dgst)
if err != nil {
rw.WriteError(http.StatusInternalServerError, fmt.Errorf("could not determine size of blob with digest %s: %w", dist.Digest.String(), err))
rw.WriteError(http.StatusInternalServerError, err)
return
}
rw.Header().Set(httpx.HeaderAcceptRanges, "bytes")
rw.Header().Set(httpx.HeaderContentType, "application/octet-stream")
rw.Header().Set(httpx.HeaderContentLength, strconv.FormatInt(size, 10))
rw.Header().Set(oci.HeaderDockerDigest, dist.Digest.String())
rw.Header().Set("Content-Length", strconv.FormatInt(size, 10))
rw.Header().Set("Docker-Content-Digest", dgst.String())
if req.Method == http.MethodHead {
return
}
rc, err := r.ociStore.GetBlob(req.Context(), dist.Digest)
var w io.Writer = rw
if r.throttler != nil {
w = r.throttler.Writer(rw)
}
rc, err := r.ociClient.GetBlob(req.Context(), dgst)
if err != nil {
rw.WriteError(http.StatusInternalServerError, fmt.Errorf("could not get reader for blob with digest %s: %w", dist.Digest.String(), err))
rw.WriteError(http.StatusInternalServerError, err)
return
}
defer rc.Close()
http.ServeContent(rw, req, "", time.Time{}, rc)
_, err = io.Copy(w, rc)
if err != nil {
rw.WriteError(http.StatusInternalServerError, err)
return
}
}
func forwardRequest(client *http.Client, bufferPool *sync.Pool, req *http.Request, rw http.ResponseWriter, addrPort netip.AddrPort) error {
// Do request to mirror.
forwardScheme := "http"
if req.TLS != nil {
forwardScheme = "https"
}
u := &url.URL{
Scheme: forwardScheme,
Host: addrPort.String(),
Path: req.URL.Path,
RawQuery: req.URL.RawQuery,
}
forwardReq, err := http.NewRequestWithContext(req.Context(), req.Method, u.String(), nil)
if err != nil {
return err
}
httpx.CopyHeader(forwardReq.Header, req.Header)
forwardResp, err := client.Do(forwardReq)
if err != nil {
return err
}
defer httpx.DrainAndClose(forwardResp.Body)
err = httpx.CheckResponseStatus(forwardResp, http.StatusOK, http.StatusPartialContent)
if err != nil {
return err
}
// TODO (phillebaba): Is it possible to retry if copy fails half way through?
// Copy forward response to response writer.
httpx.CopyHeader(rw.Header(), forwardResp.Header)
rw.WriteHeader(http.StatusOK)
//nolint: errcheck // Ignore
buf := bufferPool.Get().(*[]byte)
defer bufferPool.Put(buf)
_, err = io.CopyBuffer(rw, forwardResp.Body, *buf)
if err != nil {
return err
}
return nil
func (r *Registry) isExternalRequest(req *http.Request) bool {
return req.Host != r.localAddr
}
func getClientIP(req *http.Request) string {
forwardedFor := req.Header.Get("X-Forwarded-For")
if forwardedFor != "" {
comps := strings.Split(forwardedFor, ",")
if len(comps) > 1 {
return comps[0]
}
return forwardedFor
}
h, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return ""
}
return h
}

View File

@ -7,141 +7,14 @@ import (
"net/http/httptest"
"net/netip"
"testing"
"time"
"github.com/go-logr/logr"
"github.com/stretchr/testify/require"
"github.com/spegel-org/spegel/pkg/httpx"
"github.com/spegel-org/spegel/pkg/oci"
"github.com/spegel-org/spegel/internal/mux"
"github.com/spegel-org/spegel/pkg/routing"
)
func TestRegistryOptions(t *testing.T) {
t.Parallel()
transport := &http.Transport{}
log := logr.Discard()
opts := []RegistryOption{
WithResolveRetries(5),
WithResolveLatestTag(true),
WithResolveTimeout(10 * time.Minute),
WithTransport(transport),
WithLogger(log),
WithBasicAuth("foo", "bar"),
}
cfg := RegistryConfig{}
err := cfg.Apply(opts...)
require.NoError(t, err)
require.Equal(t, 5, cfg.ResolveRetries)
require.True(t, cfg.ResolveLatestTag)
require.Equal(t, 10*time.Minute, cfg.ResolveTimeout)
require.Equal(t, transport, cfg.Client.Transport)
require.Equal(t, log, cfg.Log)
require.Equal(t, "foo", cfg.Username)
require.Equal(t, "bar", cfg.Password)
}
func TestReadyHandler(t *testing.T) {
t.Parallel()
router := routing.NewMemoryRouter(map[string][]netip.AddrPort{}, netip.MustParseAddrPort("127.0.0.1:8080"))
reg, err := NewRegistry(nil, router)
require.NoError(t, err)
srv, err := reg.Server("")
require.NoError(t, err)
rw := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "http://localhost/healthz", nil)
srv.Handler.ServeHTTP(rw, req)
require.Equal(t, http.StatusInternalServerError, rw.Result().StatusCode)
router.Add("foo", netip.MustParseAddrPort("127.0.0.1:9090"))
rw = httptest.NewRecorder()
req = httptest.NewRequest(http.MethodGet, "http://localhost/healthz", nil)
srv.Handler.ServeHTTP(rw, req)
require.Equal(t, http.StatusOK, rw.Result().StatusCode)
}
func TestBasicAuth(t *testing.T) {
t.Parallel()
tests := []struct {
name string
username string
password string
reqUsername string
reqPassword string
expected int
}{
{
name: "no registry authentication",
expected: http.StatusOK,
},
{
name: "unnecessary authentication",
reqUsername: "foo",
reqPassword: "bar",
expected: http.StatusOK,
},
{
name: "correct authentication",
username: "foo",
password: "bar",
reqUsername: "foo",
reqPassword: "bar",
expected: http.StatusOK,
},
{
name: "invalid username",
username: "foo",
password: "bar",
reqUsername: "wrong",
reqPassword: "bar",
expected: http.StatusUnauthorized,
},
{
name: "invalid password",
username: "foo",
password: "bar",
reqUsername: "foo",
reqPassword: "wrong",
expected: http.StatusUnauthorized,
},
{
name: "missing authentication",
username: "foo",
password: "bar",
expected: http.StatusUnauthorized,
},
{
name: "missing authentication",
username: "foo",
password: "bar",
expected: http.StatusUnauthorized,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
reg, err := NewRegistry(nil, nil, WithBasicAuth(tt.username, tt.password))
require.NoError(t, err)
rw := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "http://localhost/v2/", nil)
req.SetBasicAuth(tt.reqUsername, tt.reqPassword)
srv, err := reg.Server("")
require.NoError(t, err)
srv.Handler.ServeHTTP(rw, req)
require.Equal(t, tt.expected, rw.Result().StatusCode)
})
}
}
func TestMirrorHandler(t *testing.T) {
t.Parallel()
badSvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Header().Set("foo", "bar")
@ -150,9 +23,7 @@ func TestMirrorHandler(t *testing.T) {
w.Write([]byte("hello world"))
}
}))
t.Cleanup(func() {
badSvr.Close()
})
defer badSvr.Close()
badAddrPort := netip.MustParseAddrPort(badSvr.Listener.Addr().String())
goodSvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("foo", "bar")
@ -161,25 +32,18 @@ func TestMirrorHandler(t *testing.T) {
w.Write([]byte("hello world"))
}
}))
t.Cleanup(func() {
goodSvr.Close()
})
defer goodSvr.Close()
goodAddrPort := netip.MustParseAddrPort(goodSvr.Listener.Addr().String())
unreachableAddrPort := netip.MustParseAddrPort("127.0.0.1:0")
resolver := map[string][]netip.AddrPort{
// No working peers
"sha256:c3e30fbcf3b231356a1efbd30a8ccec75134a7a8b45217ede97f4ff483540b04": {badAddrPort, unreachableAddrPort, badAddrPort},
// First Peer
"sha256:3b8a55c543ccc7ae01c47b1d35af5826a6439a9b91ab0ca96de9967759279896": {goodAddrPort, badAddrPort, badAddrPort},
// First peer error
"sha256:a0daab85ec30e2809a38c32fa676515aba22f481c56fda28637ae964ff398e3d": {unreachableAddrPort, goodAddrPort},
// Last peer working
"sha256:11242d2a347bf8ab30b9f92d5ca219bbbedf95df5a8b74631194561497c1fae8": {badAddrPort, badAddrPort, goodAddrPort},
"no-working-peers": {badAddrPort, unreachableAddrPort, badAddrPort},
"first-peer": {goodAddrPort, badAddrPort, badAddrPort},
"first-peer-error": {unreachableAddrPort, goodAddrPort},
"last-peer-working": {badAddrPort, badAddrPort, goodAddrPort},
}
router := routing.NewMemoryRouter(resolver, netip.AddrPort{})
reg, err := NewRegistry(oci.NewMemory(), router)
require.NoError(t, err)
router := routing.NewMockRouter(resolver, netip.AddrPort{})
reg := NewRegistry(nil, router)
tests := []struct {
expectedHeaders map[string][]string
@ -197,28 +61,28 @@ func TestMirrorHandler(t *testing.T) {
},
{
name: "request should not timeout and give 404 if all peers fail",
key: "sha256:c3e30fbcf3b231356a1efbd30a8ccec75134a7a8b45217ede97f4ff483540b04",
key: "no-working-peers",
expectedStatus: http.StatusNotFound,
expectedBody: "",
expectedHeaders: nil,
},
{
name: "request should work when first peer responds",
key: "sha256:3b8a55c543ccc7ae01c47b1d35af5826a6439a9b91ab0ca96de9967759279896",
key: "first-peer",
expectedStatus: http.StatusOK,
expectedBody: "hello world",
expectedHeaders: map[string][]string{"foo": {"bar"}},
},
{
name: "second peer should respond when first gives error",
key: "sha256:a0daab85ec30e2809a38c32fa676515aba22f481c56fda28637ae964ff398e3d",
key: "first-peer-error",
expectedStatus: http.StatusOK,
expectedBody: "hello world",
expectedHeaders: map[string][]string{"foo": {"bar"}},
},
{
name: "last peer should respond when two first fail",
key: "sha256:11242d2a347bf8ab30b9f92d5ca219bbbedf95df5a8b74631194561497c1fae8",
key: "last-peer-working",
expectedStatus: http.StatusOK,
expectedBody: "hello world",
expectedHeaders: map[string][]string{"foo": {"bar"}},
@ -227,17 +91,14 @@ func TestMirrorHandler(t *testing.T) {
for _, tt := range tests {
for _, method := range []string{http.MethodGet, http.MethodHead} {
t.Run(fmt.Sprintf("%s-%s", method, tt.name), func(t *testing.T) {
t.Parallel()
target := fmt.Sprintf("http://example.com/v2/foo/bar/blobs/%s", tt.key)
rw := httptest.NewRecorder()
req := httptest.NewRequest(method, target, nil)
srv, err := reg.Server("")
require.NoError(t, err)
srv.Handler.ServeHTTP(rw, req)
m := mux.NewServeMux(reg.handle)
m.ServeHTTP(rw, req)
resp := rw.Result()
defer httpx.DrainAndClose(resp.Body)
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, tt.expectedStatus, resp.StatusCode)
@ -259,3 +120,43 @@ func TestMirrorHandler(t *testing.T) {
}
}
}
func TestGetClientIP(t *testing.T) {
tests := []struct {
name string
request *http.Request
expected string
}{
{
name: "x forwarded for single",
request: &http.Request{
Header: http.Header{
"X-Forwarded-For": []string{"localhost"},
},
},
expected: "localhost",
},
{
name: "x forwarded for multiple",
request: &http.Request{
Header: http.Header{
"X-Forwarded-For": []string{"localhost,127.0.0.1"},
},
},
expected: "localhost",
},
{
name: "remote address",
request: &http.Request{
RemoteAddr: "127.0.0.1:9090",
},
expected: "127.0.0.1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ip := getClientIP(tt.request)
require.Equal(t, tt.expected, ip)
})
}
}

View File

@ -4,138 +4,116 @@ import (
"context"
"errors"
"io"
"net"
"net/http"
"slices"
"strings"
"sync"
"time"
"golang.org/x/sync/errgroup"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/spegel-org/spegel/pkg/httpx"
"github.com/multiformats/go-multiaddr"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
// Bootstrapper resolves peers to bootstrap with for the P2P router.
type Bootstrapper interface {
// Run starts the bootstrap process. Should be blocking even if not needed.
Run(ctx context.Context, id string) error
// Get returns a list of peers that should be used as bootstrap nodes.
// If the peer ID is empty it will be resolved.
// If the address is missing a port the P2P router port will be used.
Get(ctx context.Context) ([]peer.AddrInfo, error)
Get() (*peer.AddrInfo, error)
}
var _ Bootstrapper = &StaticBootstrapper{}
type StaticBootstrapper struct {
peers []peer.AddrInfo
mx sync.RWMutex
type KubernetesBootstrapper struct {
cs kubernetes.Interface
initCh chan interface{}
leaderElectionNamespace string
leaderElectioName string
id string
mx sync.RWMutex
}
func NewStaticBootstrapperFromStrings(peerStrs []string) (*StaticBootstrapper, error) {
peers := []peer.AddrInfo{}
for _, peerStr := range peerStrs {
peer, err := peer.AddrInfoFromString(peerStr)
if err != nil {
return nil, err
}
peers = append(peers, *peer)
}
return NewStaticBootstrapper(peers), nil
}
func NewStaticBootstrapper(peers []peer.AddrInfo) *StaticBootstrapper {
return &StaticBootstrapper{
peers: peers,
func NewKubernetesBootstrapper(cs kubernetes.Interface, namespace, name string) Bootstrapper {
return &KubernetesBootstrapper{
leaderElectionNamespace: namespace,
leaderElectioName: name,
cs: cs,
initCh: make(chan interface{}),
}
}
func (b *StaticBootstrapper) Run(ctx context.Context, id string) error {
<-ctx.Done()
func (k *KubernetesBootstrapper) Run(ctx context.Context, id string) error {
lockCfg := resourcelock.ResourceLockConfig{
Identity: id,
}
rl, err := resourcelock.New(
resourcelock.LeasesResourceLock,
k.leaderElectionNamespace,
k.leaderElectioName,
k.cs.CoreV1(),
k.cs.CoordinationV1(),
lockCfg,
)
if err != nil {
return err
}
leCfg := leaderelection.LeaderElectionConfig{
Lock: rl,
ReleaseOnCancel: true,
LeaseDuration: 10 * time.Second,
RenewDeadline: 5 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {},
OnStoppedLeading: func() {},
OnNewLeader: func(identity string) {
if identity == resourcelock.UnknownLeader {
return
}
// Close channel if not already closed
select {
case <-k.initCh:
break
default:
close(k.initCh)
}
k.mx.Lock()
defer k.mx.Unlock()
k.id = identity
},
},
}
leaderelection.RunOrDie(ctx, leCfg)
return nil
}
func (b *StaticBootstrapper) Get(ctx context.Context) ([]peer.AddrInfo, error) {
b.mx.RLock()
defer b.mx.RUnlock()
return b.peers, nil
}
func (k *KubernetesBootstrapper) Get() (*peer.AddrInfo, error) {
<-k.initCh
k.mx.RLock()
defer k.mx.RUnlock()
func (b *StaticBootstrapper) SetPeers(peers []peer.AddrInfo) {
b.mx.Lock()
defer b.mx.Unlock()
b.peers = peers
}
var _ Bootstrapper = &DNSBootstrapper{}
type DNSBootstrapper struct {
resolver *net.Resolver
host string
limit int
}
func NewDNSBootstrapper(host string, limit int) *DNSBootstrapper {
return &DNSBootstrapper{
resolver: &net.Resolver{},
host: host,
limit: limit,
}
}
func (b *DNSBootstrapper) Run(ctx context.Context, id string) error {
<-ctx.Done()
return nil
}
func (b *DNSBootstrapper) Get(ctx context.Context) ([]peer.AddrInfo, error) {
ips, err := b.resolver.LookupIPAddr(ctx, b.host)
addr, err := multiaddr.NewMultiaddr(k.id)
if err != nil {
return nil, err
}
if len(ips) == 0 {
addrInfo, err := peer.AddrInfoFromP2pAddr(addr)
if err != nil {
return nil, err
}
slices.SortFunc(ips, func(a, b net.IPAddr) int {
return strings.Compare(a.String(), b.String())
})
addrInfos := []peer.AddrInfo{}
for _, ip := range ips {
addr, err := manet.FromIPAndZone(ip.IP, ip.Zone)
if err != nil {
return nil, err
}
addrInfos = append(addrInfos, peer.AddrInfo{
ID: "",
Addrs: []ma.Multiaddr{addr},
})
}
limit := min(len(addrInfos), b.limit)
return addrInfos[:limit], nil
return addrInfo, err
}
var _ Bootstrapper = &HTTPBootstrapper{}
type HTTPBootstrapper struct {
httpClient *http.Client
addr string
peer string
addr string
peer string
}
func NewHTTPBootstrapper(addr, peer string) *HTTPBootstrapper {
return &HTTPBootstrapper{
httpClient: httpx.BaseClient(),
addr: addr,
peer: peer,
addr: addr,
peer: peer,
}
}
func (bs *HTTPBootstrapper) Run(ctx context.Context, id string) error {
func (h *HTTPBootstrapper) Run(ctx context.Context, id string) error {
g, ctx := errgroup.WithContext(ctx)
mux := http.NewServeMux()
mux.HandleFunc("/id", func(w http.ResponseWriter, r *http.Request) {
@ -144,7 +122,7 @@ func (bs *HTTPBootstrapper) Run(ctx context.Context, id string) error {
w.Write([]byte(id))
})
srv := http.Server{
Addr: bs.addr,
Addr: h.addr,
Handler: mux,
}
g.Go(func() error {
@ -162,25 +140,17 @@ func (bs *HTTPBootstrapper) Run(ctx context.Context, id string) error {
return g.Wait()
}
func (bs *HTTPBootstrapper) Get(ctx context.Context) ([]peer.AddrInfo, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, bs.peer, nil)
if err != nil {
return nil, err
}
resp, err := bs.httpClient.Do(req)
if err != nil {
return nil, err
}
defer httpx.DrainAndClose(resp.Body)
err = httpx.CheckResponseStatus(resp, http.StatusOK)
func (h *HTTPBootstrapper) Get() (*peer.AddrInfo, error) {
resp, err := http.DefaultClient.Get(h.peer)
if err != nil {
return nil, err
}
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
addr, err := ma.NewMultiaddr(string(b))
addr, err := multiaddr.NewMultiaddr(string(b))
if err != nil {
return nil, err
}
@ -188,5 +158,5 @@ func (bs *HTTPBootstrapper) Get(ctx context.Context) ([]peer.AddrInfo, error) {
if err != nil {
return nil, err
}
return []peer.AddrInfo{*addrInfo}, nil
return addrInfo, err
}

View File

@ -6,46 +6,12 @@ import (
"net/http/httptest"
"testing"
"golang.org/x/sync/errgroup"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/stretchr/testify/require"
)
func TestStaticBootstrap(t *testing.T) {
t.Parallel()
peers := []peer.AddrInfo{
{
ID: "foo",
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.1")},
},
{
ID: "bar",
Addrs: []ma.Multiaddr{manet.IP6Loopback},
},
}
bs := NewStaticBootstrapper(peers)
ctx, cancel := context.WithCancel(t.Context())
g, gCtx := errgroup.WithContext(ctx)
g.Go(func() error {
return bs.Run(gCtx, "")
})
bsPeers, err := bs.Get(t.Context())
require.NoError(t, err)
require.ElementsMatch(t, peers, bsPeers)
cancel()
err = g.Wait()
require.NoError(t, err)
}
func TestHTTPBootstrap(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
id := "/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -54,23 +20,12 @@ func TestHTTPBootstrap(t *testing.T) {
}))
defer svr.Close()
bs := NewHTTPBootstrapper(":", svr.URL)
ctx, cancel := context.WithCancel(t.Context())
g, gCtx := errgroup.WithContext(ctx)
g.Go(func() error {
return bs.Run(gCtx, "")
})
addrInfos, err := bs.Get(t.Context())
bootstrapper := NewHTTPBootstrapper(":", svr.URL)
//nolint:errcheck // ignore
go bootstrapper.Run(ctx, id)
addrInfo, err := bootstrapper.Get()
require.NoError(t, err)
require.Len(t, addrInfos, 1)
addrInfo := addrInfos[0]
require.Len(t, addrInfo.Addrs, 1)
require.Equal(t, "/ip4/104.131.131.82/tcp/4001", addrInfo.Addrs[0].String())
require.Equal(t, "QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", addrInfo.ID.String())
cancel()
err = g.Wait()
require.NoError(t, err)
}

View File

@ -1,80 +0,0 @@
package routing
import (
"context"
"net/netip"
"slices"
"sync"
)
var _ Router = &MemoryRouter{}
type MemoryRouter struct {
resolver map[string][]netip.AddrPort
self netip.AddrPort
mx sync.RWMutex
}
func NewMemoryRouter(resolver map[string][]netip.AddrPort, self netip.AddrPort) *MemoryRouter {
return &MemoryRouter{
resolver: resolver,
self: self,
}
}
func (m *MemoryRouter) Ready(ctx context.Context) (bool, error) {
m.mx.RLock()
defer m.mx.RUnlock()
return len(m.resolver) > 0, nil
}
func (m *MemoryRouter) Resolve(ctx context.Context, key string, count int) (<-chan netip.AddrPort, error) {
m.mx.RLock()
peers, ok := m.resolver[key]
m.mx.RUnlock()
peerCh := make(chan netip.AddrPort, count)
// If no peers exist close the channel to stop any consumer.
if !ok {
close(peerCh)
return peerCh, nil
}
go func() {
for _, peer := range peers {
peerCh <- peer
}
close(peerCh)
}()
return peerCh, nil
}
func (m *MemoryRouter) Advertise(ctx context.Context, keys []string) error {
for _, key := range keys {
m.Add(key, m.self)
}
return nil
}
func (m *MemoryRouter) Add(key string, ap netip.AddrPort) {
m.mx.Lock()
defer m.mx.Unlock()
v, ok := m.resolver[key]
if !ok {
m.resolver[key] = []netip.AddrPort{ap}
return
}
if slices.Contains(v, ap) {
return
}
m.resolver[key] = append(v, ap)
}
func (m *MemoryRouter) Lookup(key string) ([]netip.AddrPort, bool) {
m.mx.RLock()
defer m.mx.RUnlock()
v, ok := m.resolver[key]
return v, ok
}

View File

@ -1,47 +0,0 @@
package routing
import (
"net/netip"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestMemoryRouter(t *testing.T) {
t.Parallel()
r := NewMemoryRouter(map[string][]netip.AddrPort{}, netip.AddrPort{})
isReady, err := r.Ready(t.Context())
require.NoError(t, err)
require.False(t, isReady)
err = r.Advertise(t.Context(), []string{"foo"})
require.NoError(t, err)
isReady, err = r.Ready(t.Context())
require.NoError(t, err)
require.True(t, isReady)
r.Add("foo", netip.MustParseAddrPort("127.0.0.1:9090"))
peerCh, err := r.Resolve(t.Context(), "foo", 2)
require.NoError(t, err)
peers := []netip.AddrPort{}
for peer := range peerCh {
peers = append(peers, peer)
}
require.Len(t, peers, 2)
peers, ok := r.Lookup("foo")
require.True(t, ok)
require.Len(t, peers, 2)
peerCh, err = r.Resolve(t.Context(), "bar", 1)
require.NoError(t, err)
time.Sleep(1 * time.Second)
select {
case <-peerCh:
default:
t.Error("expected peer channel to be closed")
}
_, ok = r.Lookup("bar")
require.False(t, ok)
}

60
pkg/routing/mock.go Normal file
View File

@ -0,0 +1,60 @@
package routing
import (
"context"
"net/netip"
"sync"
)
type MockRouter struct {
resolver map[string][]netip.AddrPort
self netip.AddrPort
mx sync.RWMutex
}
func NewMockRouter(resolver map[string][]netip.AddrPort, self netip.AddrPort) *MockRouter {
return &MockRouter{
resolver: resolver,
self: self,
}
}
func (m *MockRouter) Ready() (bool, error) {
m.mx.RLock()
defer m.mx.RUnlock()
return len(m.resolver) > 0, nil
}
func (m *MockRouter) Resolve(ctx context.Context, key string, allowSelf bool, count int) (<-chan netip.AddrPort, error) {
peerCh := make(chan netip.AddrPort, count)
peers, ok := m.resolver[key]
// Not found will look forever until timeout.
if !ok {
return peerCh, nil
}
go func() {
m.mx.RLock()
defer m.mx.RUnlock()
for _, peer := range peers {
peerCh <- peer
}
close(peerCh)
}()
return peerCh, nil
}
func (m *MockRouter) Advertise(ctx context.Context, keys []string) error {
m.mx.Lock()
defer m.mx.Unlock()
for _, key := range keys {
m.resolver[key] = []netip.AddrPort{m.self}
}
return nil
}
func (m *MockRouter) LookupKey(key string) ([]netip.AddrPort, bool) {
m.mx.RLock()
defer m.mx.RUnlock()
v, ok := m.resolver[key]
return v, ok
}

View File

@ -2,16 +2,10 @@ package routing
import (
"context"
"crypto/ed25519"
"crypto/rand"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"net"
"net/netip"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@ -20,10 +14,8 @@ import (
cid "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/sec"
"github.com/libp2p/go-libp2p/p2p/discovery/routing"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
@ -36,41 +28,6 @@ import (
const KeyTTL = 10 * time.Minute
type P2PRouterConfig struct {
DataDir string
Libp2pOpts []libp2p.Option
}
func (cfg *P2PRouterConfig) Apply(opts ...P2PRouterOption) error {
for _, opt := range opts {
if opt == nil {
continue
}
if err := opt(cfg); err != nil {
return err
}
}
return nil
}
type P2PRouterOption func(cfg *P2PRouterConfig) error
func WithLibP2POptions(opts ...libp2p.Option) P2PRouterOption {
return func(cfg *P2PRouterConfig) error {
cfg.Libp2pOpts = opts
return nil
}
}
func WithDataDir(dataDir string) P2PRouterOption {
return func(cfg *P2PRouterConfig) error {
cfg.DataDir = dataDir
return nil
}
}
var _ Router = &P2PRouter{}
type P2PRouter struct {
bootstrapper Bootstrapper
host host.Host
@ -79,13 +36,7 @@ type P2PRouter struct {
registryPort uint16
}
func NewP2PRouter(ctx context.Context, addr string, bs Bootstrapper, registryPortStr string, opts ...P2PRouterOption) (*P2PRouter, error) {
cfg := P2PRouterConfig{}
err := cfg.Apply(opts...)
if err != nil {
return nil, err
}
func NewP2PRouter(ctx context.Context, addr string, bootstrapper Bootstrapper, registryPortStr string, opts ...libp2p.Option) (*P2PRouter, error) {
registryPort, err := strconv.ParseUint(registryPortStr, 10, 16)
if err != nil {
return nil, err
@ -115,20 +66,12 @@ func NewP2PRouter(ctx context.Context, addr string, bs Bootstrapper, registryPor
}
return nil
})
libp2pOpts := []libp2p.Option{
opts = append(opts,
libp2p.ListenAddrs(multiAddrs...),
libp2p.PrometheusRegisterer(metrics.DefaultRegisterer),
addrFactoryOpt,
}
if cfg.DataDir != "" {
peerKey, err := loadOrCreatePrivateKey(ctx, cfg.DataDir)
if err != nil {
return nil, err
}
libp2pOpts = append(libp2pOpts, libp2p.Identity(peerKey))
}
libp2pOpts = append(libp2pOpts, cfg.Libp2pOpts...)
host, err := libp2p.New(libp2pOpts...)
)
host, err := libp2p.New(opts...)
if err != nil {
return nil, fmt.Errorf("could not create host: %w", err)
}
@ -140,12 +83,25 @@ func NewP2PRouter(ctx context.Context, addr string, bs Bootstrapper, registryPor
return nil, fmt.Errorf("expected single host address but got %d %s", len(addrs), strings.Join(addrs, ", "))
}
log := logr.FromContextOrDiscard(ctx).WithName("p2p")
bootstrapPeerOpt := dht.BootstrapPeersFunc(func() []peer.AddrInfo {
addrInfo, err := bootstrapper.Get()
if err != nil {
log.Error(err, "could not get bootstrap addresses")
return nil
}
if addrInfo.ID == host.ID() {
log.Info("leader is self skipping connection to bootstrap node")
return nil
}
return []peer.AddrInfo{*addrInfo}
})
dhtOpts := []dht.Option{
dht.Mode(dht.ModeServer),
dht.ProtocolPrefix("/spegel"),
dht.DisableValues(),
dht.MaxRecordAge(KeyTTL),
dht.BootstrapPeersFunc(bootstrapFunc(ctx, bs, host)),
bootstrapPeerOpt,
}
kdht, err := dht.New(ctx, host, dhtOpts...)
if err != nil {
@ -154,7 +110,7 @@ func NewP2PRouter(ctx context.Context, addr string, bs Bootstrapper, registryPor
rd := routing.NewRoutingDiscovery(kdht)
return &P2PRouter{
bootstrapper: bs,
bootstrapper: bootstrapper,
host: host,
kdht: kdht,
rd: rd,
@ -162,53 +118,38 @@ func NewP2PRouter(ctx context.Context, addr string, bs Bootstrapper, registryPor
}, nil
}
func (r *P2PRouter) Run(ctx context.Context) (err error) {
func (r *P2PRouter) Run(ctx context.Context) error {
self := fmt.Sprintf("%s/p2p/%s", r.host.Addrs()[0].String(), r.host.ID().String())
logr.FromContextOrDiscard(ctx).WithName("p2p").Info("starting p2p router", "id", self)
if err := r.kdht.Bootstrap(ctx); err != nil {
return fmt.Errorf("could not bootstrap distributed hash table: %w", err)
return fmt.Errorf("could not boostrap distributed hash table: %w", err)
}
defer func() {
cerr := r.host.Close()
if cerr != nil {
err = errors.Join(err, cerr)
}
}()
err = r.bootstrapper.Run(ctx, self)
err := r.bootstrapper.Run(ctx, self)
if err != nil {
return err
}
return nil
}
func (r *P2PRouter) Ready(ctx context.Context) (bool, error) {
addrInfos, err := r.bootstrapper.Get(ctx)
if err != nil {
return false, err
}
if len(addrInfos) == 0 {
return false, nil
}
if len(addrInfos) == 1 {
matches, err := hostMatches(*host.InfoFromHost(r.host), addrInfos[0])
if err != nil {
return false, err
}
if matches {
return true, nil
}
}
if r.kdht.RoutingTable().Size() > 0 {
return true, nil
}
err = r.kdht.Bootstrap(ctx)
if err != nil {
return false, err
}
return false, nil
func (r *P2PRouter) Close() error {
return r.host.Close()
}
func (r *P2PRouter) Resolve(ctx context.Context, key string, count int) (<-chan netip.AddrPort, error) {
func (r *P2PRouter) Ready() (bool, error) {
addrInfo, err := r.bootstrapper.Get()
if err != nil {
return false, err
}
if addrInfo.ID == r.host.ID() {
return true, nil
}
if r.kdht.RoutingTable().Size() == 0 {
return false, nil
}
return true, nil
}
func (r *P2PRouter) Resolve(ctx context.Context, key string, allowSelf bool, count int) (<-chan netip.AddrPort, error) {
log := logr.FromContextOrDiscard(ctx).WithValues("host", r.host.ID().String(), "key", key)
c, err := createCid(key)
if err != nil {
@ -220,36 +161,34 @@ func (r *P2PRouter) Resolve(ctx context.Context, key string, count int) (<-chan
if peerBufferSize == 0 {
peerBufferSize = 20
}
addrInfoCh := r.rd.FindProvidersAsync(ctx, c, count)
addrCh := r.rd.FindProvidersAsync(ctx, c, count)
peerCh := make(chan netip.AddrPort, peerBufferSize)
go func() {
resolveTimer := prometheus.NewTimer(metrics.ResolveDurHistogram.WithLabelValues("libp2p"))
for addrInfo := range addrInfoCh {
for info := range addrCh {
resolveTimer.ObserveDuration()
if len(addrInfo.Addrs) != 1 {
if !allowSelf && info.ID == r.host.ID() {
continue
}
if len(info.Addrs) != 1 {
addrs := []string{}
for _, addr := range addrInfo.Addrs {
for _, addr := range info.Addrs {
addrs = append(addrs, addr.String())
}
log.Info("expected address list to only contain a single item", "addresses", strings.Join(addrs, ", "))
continue
}
ip, err := manet.ToIP(addrInfo.Addrs[0])
ipAddr, err := ipInMultiaddr(info.Addrs[0])
if err != nil {
log.Error(err, "could not get IP address")
continue
}
ipAddr, ok := netip.AddrFromSlice(ip)
if !ok {
log.Error(errors.New("IP is not IPV4 or IPV6"), "could not convert IP")
continue
}
peer := netip.AddrPortFrom(ipAddr, r.registryPort)
// Don't block if the client has disconnected before reading all values from the channel
select {
case peerCh <- peer:
default:
log.V(4).Info("mirror endpoint dropped: peer channel is full")
log.V(10).Info("mirror endpoint dropped: peer channel is full")
}
}
close(peerCh)
@ -258,7 +197,7 @@ func (r *P2PRouter) Resolve(ctx context.Context, key string, count int) (<-chan
}
func (r *P2PRouter) Advertise(ctx context.Context, keys []string) error {
logr.FromContextOrDiscard(ctx).V(4).Info("advertising keys", "host", r.host.ID().String(), "keys", keys)
logr.FromContextOrDiscard(ctx).V(10).Info("advertising keys", "host", r.host.ID().String(), "keys", keys)
for _, key := range keys {
c, err := createCid(key)
if err != nil {
@ -272,86 +211,6 @@ func (r *P2PRouter) Advertise(ctx context.Context, keys []string) error {
return nil
}
func bootstrapFunc(ctx context.Context, bootstrapper Bootstrapper, h host.Host) func() []peer.AddrInfo {
log := logr.FromContextOrDiscard(ctx).WithName("p2p")
return func() []peer.AddrInfo {
bootstrapCtx, bootstrapCancel := context.WithTimeout(context.Background(), 10*time.Second)
defer bootstrapCancel()
// TODO (phillebaba): Consider if we should do a best effort bootstrap without host address.
hostAddrs := h.Addrs()
if len(hostAddrs) == 0 {
return nil
}
var hostPort ma.Component
ma.ForEach(hostAddrs[0], func(c ma.Component) bool {
if c.Protocol().Code == ma.P_TCP {
hostPort = c
return false
}
return true
})
addrInfos, err := bootstrapper.Get(bootstrapCtx)
if err != nil {
log.Error(err, "could not get bootstrap addresses")
return nil
}
filteredAddrInfos := []peer.AddrInfo{}
for _, addrInfo := range addrInfos {
// Skip addresses that match host.
matches, err := hostMatches(*host.InfoFromHost(h), addrInfo)
if err != nil {
log.Error(err, "could not compare host with address")
continue
}
if matches {
log.Info("skipping bootstrap peer that is same as host")
continue
}
// Add port to address if it is missing.
modifiedAddrs := []ma.Multiaddr{}
for _, addr := range addrInfo.Addrs {
hasPort := false
ma.ForEach(addr, func(c ma.Component) bool {
if c.Protocol().Code == ma.P_TCP {
hasPort = true
return false
}
return true
})
if hasPort {
modifiedAddrs = append(modifiedAddrs, addr)
continue
}
modifiedAddrs = append(modifiedAddrs, ma.Join(addr, &hostPort))
}
addrInfo.Addrs = modifiedAddrs
// Resolve ID if it is missing.
if addrInfo.ID != "" {
filteredAddrInfos = append(filteredAddrInfos, addrInfo)
continue
}
addrInfo.ID = "id"
err = h.Connect(bootstrapCtx, addrInfo)
var mismatchErr sec.ErrPeerIDMismatch
if !errors.As(err, &mismatchErr) {
log.Error(err, "could not get peer id")
continue
}
addrInfo.ID = mismatchErr.Actual
filteredAddrInfos = append(filteredAddrInfos, addrInfo)
}
if len(filteredAddrInfos) == 0 {
log.Info("no bootstrap nodes found")
return nil
}
return filteredAddrInfos
}
}
func listenMultiaddrs(addr string) ([]ma.Multiaddr, error) {
h, p, err := net.SplitHostPort(addr)
if err != nil {
@ -386,6 +245,24 @@ func listenMultiaddrs(addr string) ([]ma.Multiaddr, error) {
return multiAddrs, nil
}
func ipInMultiaddr(multiAddr ma.Multiaddr) (netip.Addr, error) {
for _, p := range []int{ma.P_IP6, ma.P_IP4} {
v, err := multiAddr.ValueForProtocol(p)
if errors.Is(err, ma.ErrProtocolNotFound) {
continue
}
if err != nil {
return netip.Addr{}, err
}
ipAddr, err := netip.ParseAddr(v)
if err != nil {
return netip.Addr{}, err
}
return ipAddr, nil
}
return netip.Addr{}, fmt.Errorf("IP not found in address")
}
func isIp6(m ma.Multiaddr) bool {
c, _ := ma.SplitFirst(m)
if c == nil || c.Protocol().Code != ma.P_IP6 {
@ -407,83 +284,3 @@ func createCid(key string) (cid.Cid, error) {
}
return c, nil
}
func hostMatches(host, addrInfo peer.AddrInfo) (bool, error) {
// Skip self when address ID matches host ID.
if host.ID != "" && addrInfo.ID != "" {
return host.ID == addrInfo.ID, nil
}
// Skip self when IP matches
hostIP, err := manet.ToIP(host.Addrs[0])
if err != nil {
return false, err
}
for _, addr := range addrInfo.Addrs {
addrIP, err := manet.ToIP(addr)
if err != nil {
return false, err
}
if hostIP.Equal(addrIP) {
return true, nil
}
}
return false, nil
}
func loadOrCreatePrivateKey(ctx context.Context, dataDir string) (crypto.PrivKey, error) {
keyPath := filepath.Join(dataDir, "private.key")
log := logr.FromContextOrDiscard(ctx).WithValues("path", keyPath)
err := os.MkdirAll(dataDir, 0o755)
if err != nil {
return nil, err
}
b, err := os.ReadFile(keyPath)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, err
}
if errors.Is(err, os.ErrNotExist) {
log.Info("creating a new private key")
privKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
if err != nil {
return nil, err
}
rawBytes, err := privKey.Raw()
if err != nil {
return nil, err
}
pkcs8Bytes, err := x509.MarshalPKCS8PrivateKey(ed25519.PrivateKey(rawBytes))
if err != nil {
return nil, err
}
block := &pem.Block{
Type: "PRIVATE KEY",
Bytes: pkcs8Bytes,
}
pemData := pem.EncodeToMemory(block)
err = os.WriteFile(keyPath, pemData, 0o600)
if err != nil {
return nil, err
}
return privKey, nil
}
log.Info("loading the private key from data directory")
block, _ := pem.Decode(b)
if block == nil || block.Type != "PRIVATE KEY" {
return nil, fmt.Errorf("invalid PEM block type %s", block.Type)
}
parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
edKey, ok := parsedKey.(ed25519.PrivateKey)
if !ok {
return nil, errors.New("not an Ed25519 private key")
}
privKey, err := crypto.UnmarshalEd25519PrivateKey(edKey)
if err != nil {
return nil, err
}
return privKey, nil
}

View File

@ -1,184 +1,14 @@
package routing
import (
"context"
"fmt"
"net/netip"
"testing"
"time"
"github.com/go-logr/logr"
tlog "github.com/go-logr/logr/testing"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestP2PRouterOptions(t *testing.T) {
t.Parallel()
libp2pOpts := []libp2p.Option{
libp2p.ListenAddrStrings("foo"),
}
opts := []P2PRouterOption{
WithLibP2POptions(libp2pOpts...),
WithDataDir("foobar"),
}
cfg := P2PRouterConfig{}
err := cfg.Apply(opts...)
require.NoError(t, err)
require.Equal(t, libp2pOpts, cfg.Libp2pOpts)
require.Equal(t, "foobar", cfg.DataDir)
}
func TestP2PRouter(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(t.Context())
bs := NewStaticBootstrapper(nil)
router, err := NewP2PRouter(ctx, "localhost:0", bs, "9090")
require.NoError(t, err)
g, gCtx := errgroup.WithContext(ctx)
g.Go(func() error {
return router.Run(gCtx)
})
// TODO (phillebaba): There is a test flake that sometime occurs sometimes if code runs too fast.
// Flake results in a peer being returned without an address. Revisit in Go 1.24 to see if this can be solved better.
time.Sleep(1 * time.Second)
err = router.Advertise(ctx, nil)
require.NoError(t, err)
peerCh, err := router.Resolve(ctx, "foo", 1)
require.NoError(t, err)
peer := <-peerCh
require.False(t, peer.IsValid())
err = router.Advertise(ctx, []string{"foo"})
require.NoError(t, err)
peerCh, err = router.Resolve(ctx, "foo", 1)
require.NoError(t, err)
peer = <-peerCh
require.True(t, peer.IsValid())
cancel()
err = g.Wait()
require.NoError(t, err)
}
func TestReady(t *testing.T) {
t.Parallel()
bs := NewStaticBootstrapper(nil)
router, err := NewP2PRouter(t.Context(), "localhost:0", bs, "9090")
require.NoError(t, err)
// Should not be ready if no peers are found.
isReady, err := router.Ready(t.Context())
require.NoError(t, err)
require.False(t, isReady)
// Should be ready if only peer is host.
bs.SetPeers([]peer.AddrInfo{*host.InfoFromHost(router.host)})
isReady, err = router.Ready(t.Context())
require.NoError(t, err)
require.True(t, isReady)
// Shouldd be not ready with multiple peers but empty routing table.
bs.SetPeers([]peer.AddrInfo{{}, {}})
isReady, err = router.Ready(t.Context())
require.NoError(t, err)
require.False(t, isReady)
// Should be ready with multiple peers and populated routing table.
newPeer, err := router.kdht.RoutingTable().GenRandPeerID(0)
require.NoError(t, err)
ok, err := router.kdht.RoutingTable().TryAddPeer(newPeer, false, false)
require.NoError(t, err)
require.True(t, ok)
bs.SetPeers([]peer.AddrInfo{{}, {}})
isReady, err = router.Ready(t.Context())
require.NoError(t, err)
require.True(t, isReady)
}
func TestBootstrapFunc(t *testing.T) {
t.Parallel()
log := tlog.NewTestLogger(t)
ctx := logr.NewContext(t.Context(), log)
mn, err := mocknet.WithNPeers(2)
require.NoError(t, err)
tests := []struct {
name string
peers []peer.AddrInfo
expected []string
}{
{
name: "no peers",
peers: []peer.AddrInfo{},
expected: []string{},
},
{
name: "nothing missing",
peers: []peer.AddrInfo{
{
ID: "foo",
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.1/tcp/8080")},
},
},
expected: []string{"/ip4/192.168.1.1/tcp/8080/p2p/foo"},
},
{
name: "only self",
peers: []peer.AddrInfo{
{
ID: mn.Hosts()[0].ID(),
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.1/tcp/8080")},
},
},
expected: []string{},
},
{
name: "missing port",
peers: []peer.AddrInfo{
{
ID: "foo",
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.1")},
},
},
expected: []string{"/ip4/192.168.1.1/tcp/4242/p2p/foo"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
bs := NewStaticBootstrapper(tt.peers)
f := bootstrapFunc(ctx, bs, mn.Hosts()[0])
peers := f()
peerStrs := []string{}
for _, p := range peers {
id, err := p.ID.Marshal()
require.NoError(t, err)
peerStrs = append(peerStrs, fmt.Sprintf("%s/p2p/%s", p.Addrs[0].String(), string(id)))
}
require.ElementsMatch(t, tt.expected, peerStrs)
})
}
}
func TestListenMultiaddrs(t *testing.T) {
t.Parallel()
tests := []struct {
name string
addr string
@ -202,11 +32,8 @@ func TestListenMultiaddrs(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
multiAddrs, err := listenMultiaddrs(tt.addr)
require.NoError(t, err)
//nolint: testifylint // This is easier to read and understand.
require.Equal(t, len(tt.expected), len(multiAddrs))
for i, e := range tt.expected {
require.Equal(t, e, multiAddrs[i].String())
@ -215,9 +42,35 @@ func TestListenMultiaddrs(t *testing.T) {
}
}
func TestIsIp6(t *testing.T) {
t.Parallel()
func TestIPInMultiaddr(t *testing.T) {
tests := []struct {
ma string
expected netip.Addr
name string
}{
{
name: "ipv4",
ma: "/ip4/10.244.1.2/tcp/5001",
expected: netip.MustParseAddr("10.244.1.2"),
},
{
name: "ipv6",
ma: "/ip6/0:0:0:0:0:ffff:0af4:0102/tcp/5001",
expected: netip.MustParseAddr("::ffff:10.244.1.2"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
multiAddr, err := ma.NewMultiaddr(tt.ma)
require.NoError(t, err)
v, err := ipInMultiaddr(multiAddr)
require.NoError(t, err)
require.Equal(t, tt.expected, v)
})
}
}
func TestIsIp6(t *testing.T) {
m, err := ma.NewMultiaddr("/ip6/::")
require.NoError(t, err)
require.True(t, isIp6(m))
@ -225,110 +78,3 @@ func TestIsIp6(t *testing.T) {
require.NoError(t, err)
require.False(t, isIp6(m))
}
func TestCreateCid(t *testing.T) {
t.Parallel()
c, err := createCid("foobar")
require.NoError(t, err)
require.Equal(t, "bafkreigdvoh7cnza5cwzar65hfdgwpejotszfqx2ha6uuolaofgk54ge6i", c.String())
}
func TestHostMatches(t *testing.T) {
t.Parallel()
tests := []struct {
name string
host peer.AddrInfo
addrInfo peer.AddrInfo
expected bool
}{
{
name: "ID match",
host: peer.AddrInfo{
ID: "foo",
Addrs: []ma.Multiaddr{},
},
addrInfo: peer.AddrInfo{
ID: "foo",
Addrs: []ma.Multiaddr{},
},
expected: true,
},
{
name: "ID do not match",
host: peer.AddrInfo{
ID: "foo",
Addrs: []ma.Multiaddr{},
},
addrInfo: peer.AddrInfo{
ID: "bar",
Addrs: []ma.Multiaddr{},
},
expected: false,
},
{
name: "IP4 match",
host: peer.AddrInfo{
ID: "",
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.1")},
},
addrInfo: peer.AddrInfo{
ID: "",
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.1")},
},
expected: true,
},
{
name: "IP4 do not match",
host: peer.AddrInfo{
ID: "",
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.1")},
},
addrInfo: peer.AddrInfo{
ID: "",
Addrs: []ma.Multiaddr{ma.StringCast("/ip4/192.168.1.2")},
},
expected: false,
},
{
name: "IP6 match",
host: peer.AddrInfo{
ID: "",
Addrs: []ma.Multiaddr{ma.StringCast("/ip6/c3c9:152b:73d1:dad0:e2f9:a521:6356:88ba")},
},
addrInfo: peer.AddrInfo{
ID: "",
Addrs: []ma.Multiaddr{ma.StringCast("/ip6/c3c9:152b:73d1:dad0:e2f9:a521:6356:88ba")},
},
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
matches, err := hostMatches(tt.host, tt.addrInfo)
require.NoError(t, err)
require.Equal(t, tt.expected, matches)
})
}
}
func TestLoadOrCreatePrivateKey(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
data := []byte("hello world")
firstPrivKey, err := loadOrCreatePrivateKey(t.Context(), tmpDir)
require.NoError(t, err)
sig, err := firstPrivKey.Sign(data)
require.NoError(t, err)
secondPrivKey, err := loadOrCreatePrivateKey(t.Context(), tmpDir)
require.NoError(t, err)
ok, err := secondPrivKey.GetPublic().Verify(data, sig)
require.NoError(t, err)
require.True(t, ok)
require.True(t, firstPrivKey.Equals(secondPrivKey))
}

View File

@ -5,12 +5,8 @@ import (
"net/netip"
)
// Router implements the discovery of content.
type Router interface {
// Ready returns true when the router is ready.
Ready(ctx context.Context) (bool, error)
// Resolve asynchronously discovers addresses that can serve the content defined by the give key.
Resolve(ctx context.Context, key string, count int) (<-chan netip.AddrPort, error)
// Advertise broadcasts that the current router can serve the content.
Ready() (bool, error)
Resolve(ctx context.Context, key string, allowSelf bool, count int) (<-chan netip.AddrPort, error)
Advertise(ctx context.Context, keys []string) error
}

View File

@ -3,6 +3,7 @@ package state
import (
"context"
"errors"
"fmt"
"time"
"github.com/go-logr/logr"
@ -13,107 +14,114 @@ import (
"github.com/spegel-org/spegel/pkg/routing"
)
func Track(ctx context.Context, ociStore oci.Store, router routing.Router, resolveLatestTag bool) error {
func Track(ctx context.Context, ociClient oci.Client, router routing.Router, resolveLatestTag bool) error {
log := logr.FromContextOrDiscard(ctx)
eventCh, err := ociStore.Subscribe(ctx)
if err != nil {
return err
}
immediateCh := make(chan time.Time, 1)
immediateCh <- time.Now()
close(immediateCh)
eventCh, errCh := ociClient.Subscribe(ctx)
immediate := make(chan time.Time, 1)
immediate <- time.Now()
expirationTicker := time.NewTicker(routing.KeyTTL - time.Minute)
defer expirationTicker.Stop()
tickerCh := channel.Merge(immediateCh, expirationTicker.C)
ticker := channel.Merge(immediate, expirationTicker.C)
for {
select {
case <-ctx.Done():
return nil
case <-tickerCh:
log.Info("running state update")
err := tick(ctx, ociStore, router, resolveLatestTag)
if err != nil {
case <-ticker:
log.Info("running scheduled image state update")
if err := all(ctx, ociClient, router, resolveLatestTag); err != nil {
log.Error(err, "received errors when updating all images")
continue
}
case event, ok := <-eventCh:
if !ok {
return errors.New("event channel closed")
return errors.New("image event channel closed")
}
log.Info("OCI event", "key", event.Key, "type", event.Type)
err := handle(ctx, router, event)
if err != nil {
log.Error(err, "could not handle event")
log.Info("received image event", "image", event.Image, "type", event.Type)
if _, err := update(ctx, ociClient, router, event, false, resolveLatestTag); err != nil {
log.Error(err, "received error when updating image")
continue
}
case err, ok := <-errCh:
if !ok {
return errors.New("image error channel closed")
}
log.Error(err, "event channel error")
}
}
}
func tick(ctx context.Context, ociStore oci.Store, router routing.Router, resolveLatest bool) error {
advertisedImages := map[string]float64{}
advertisedImageDigests := map[string]float64{}
advertisedImageTags := map[string]float64{}
advertisedKeys := map[string]float64{}
imgs, err := ociStore.ListImages(ctx)
func all(ctx context.Context, ociClient oci.Client, router routing.Router, resolveLatestTag bool) error {
log := logr.FromContextOrDiscard(ctx).V(5)
imgs, err := ociClient.ListImages(ctx)
if err != nil {
return err
}
// TODO: Update metrics on subscribed events. This will require keeping state in memory to know about key count changes.
metrics.AdvertisedKeys.Reset()
metrics.AdvertisedImages.Reset()
metrics.AdvertisedImageTags.Reset()
metrics.AdvertisedImageDigests.Reset()
errs := []error{}
targets := map[string]interface{}{}
for _, img := range imgs {
advertisedImages[img.Registry] += 1
advertisedImageDigests[img.Registry] += 1
if !resolveLatest && img.IsLatestTag() {
_, skipDigests := targets[img.Digest.String()]
// Handle the list re-sync as update events; this will also prevent the
// update function from setting metrics values.
event := oci.ImageEvent{Image: img, Type: oci.UpdateEvent}
log.Info("sync image event", "image", event.Image, "type", event.Type)
keyTotal, err := update(ctx, ociClient, router, event, skipDigests, resolveLatestTag)
if err != nil {
errs = append(errs, err)
continue
}
tagName, ok := img.TagName()
if !ok {
continue
}
err := router.Advertise(ctx, []string{tagName})
if err != nil {
return err
}
advertisedImageTags[img.Registry] += 1
advertisedKeys[img.Registry] += 1
}
contents, err := ociStore.ListContents(ctx)
if err != nil {
return err
}
for _, content := range contents {
err := router.Advertise(ctx, []string{content.Digest.String()})
if err != nil {
return err
}
for _, registry := range content.Registires {
advertisedKeys[registry] += 1
targets[img.Digest.String()] = nil
metrics.AdvertisedKeys.WithLabelValues(img.Registry).Add(float64(keyTotal))
metrics.AdvertisedImages.WithLabelValues(img.Registry).Add(1)
if img.Tag == "" {
metrics.AdvertisedImageDigests.WithLabelValues(event.Image.Registry).Add(1)
} else {
metrics.AdvertisedImageTags.WithLabelValues(event.Image.Registry).Add(1)
}
}
for k, v := range advertisedImages {
metrics.AdvertisedImages.WithLabelValues(k).Set(v)
}
for k, v := range advertisedImageDigests {
metrics.AdvertisedImageDigests.WithLabelValues(k).Set(v)
}
for k, v := range advertisedImageTags {
metrics.AdvertisedImageTags.WithLabelValues(k).Set(v)
}
for k, v := range advertisedKeys {
metrics.AdvertisedKeys.WithLabelValues(k).Set(v)
}
return nil
return errors.Join(errs...)
}
func handle(ctx context.Context, router routing.Router, event oci.OCIEvent) error {
if event.Type != oci.CreateEvent {
return nil
func update(ctx context.Context, ociClient oci.Client, router routing.Router, event oci.ImageEvent, skipDigests, resolveLatestTag bool) (int, error) {
keys := []string{}
if !(!resolveLatestTag && event.Image.IsLatestTag()) {
if tagRef, ok := event.Image.TagName(); ok {
keys = append(keys, tagRef)
}
}
err := router.Advertise(ctx, []string{event.Key})
if event.Type == oci.DeleteEvent {
// We don't know how many digest keys were associated with the deleted image;
// that can only be updated by the full image list sync in all().
metrics.AdvertisedImages.WithLabelValues(event.Image.Registry).Sub(1)
// DHT doesn't actually have any way to stop providing a key, you just have to wait for the record to expire
// from the datastore. Record TTL is a datastore-level value, so we can't even re-provide with a shorter TTL.
return 0, nil
}
if !skipDigests {
dgsts, err := ociClient.AllIdentifiers(ctx, event.Image)
if err != nil {
return 0, fmt.Errorf("could not get digests for image %s: %w", event.Image.String(), err)
}
keys = append(keys, dgsts...)
}
err := router.Advertise(ctx, keys)
if err != nil {
return err
return 0, fmt.Errorf("could not advertise image %s: %w", event.Image.String(), err)
}
return nil
if event.Type == oci.CreateEvent {
// We don't know how many unique digest keys will be associated with the new image;
// that can only be updated by the full image list sync in all().
metrics.AdvertisedImages.WithLabelValues(event.Image.Registry).Add(1)
if event.Image.Tag == "" {
metrics.AdvertisedImageDigests.WithLabelValues(event.Image.Registry).Add(1)
} else {
metrics.AdvertisedImageTags.WithLabelValues(event.Image.Registry).Add(1)
}
}
return len(keys), nil
}

View File

@ -2,60 +2,17 @@ package state
import (
"context"
"crypto/sha256"
"encoding/json"
"math/rand/v2"
"net/netip"
"strconv"
"testing"
"time"
"golang.org/x/sync/errgroup"
"github.com/go-logr/logr"
tlog "github.com/go-logr/logr/testing"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/require"
"github.com/spegel-org/spegel/pkg/oci"
"github.com/spegel-org/spegel/pkg/routing"
)
func TestTrack(t *testing.T) {
t.Parallel()
ociStore := oci.NewMemory()
imgRefs := []string{
"docker.io/library/ubuntu:latest",
"ghcr.io/spegel-org/spegel:v0.0.9",
}
imgs := []oci.Image{}
for _, imageStr := range imgRefs {
manifest := ocispec.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
MediaType: ocispec.MediaTypeImageManifest,
Annotations: map[string]string{
"random": strconv.Itoa(rand.Int()),
},
}
b, err := json.Marshal(&manifest)
require.NoError(t, err)
hash := sha256.New()
_, err = hash.Write(b)
require.NoError(t, err)
dgst := digest.NewDigest(digest.SHA256, hash)
ociStore.AddBlob(b, dgst)
img, err := oci.ParseImageRequireDigest(imageStr, dgst)
require.NoError(t, err)
ociStore.AddImage(img)
imgs = append(imgs, img)
}
func TestBasic(t *testing.T) {
tests := []struct {
name string
resolveLatestTag bool
@ -69,30 +26,41 @@ func TestTrack(t *testing.T) {
resolveLatestTag: false,
},
}
imgRefs := []string{
"docker.io/library/ubuntu:latest@sha256:b060fffe8e1561c9c3e6dea6db487b900100fc26830b9ea2ec966c151ab4c020",
"ghcr.io/spegel-org/spegel:v0.0.9@sha256:fa32bd3bcd49a45a62cfc1b0fed6a0b63bf8af95db5bad7ec22865aee0a4b795",
"docker.io/library/alpine@sha256:25fad2a32ad1f6f510e528448ae1ec69a28ef81916a004d3629874104f8a7f70",
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
imgs := []oci.Image{}
for _, imageStr := range imgRefs {
img, err := oci.Parse(imageStr, "")
require.NoError(t, err)
imgs = append(imgs, img)
}
ociClient := oci.NewMockClient(imgs)
router := routing.NewMockRouter(map[string][]netip.AddrPort{}, netip.MustParseAddrPort("127.0.0.1:5000"))
log := tlog.NewTestLogger(t)
ctx := logr.NewContext(t.Context(), log)
ctx, cancel := context.WithCancel(ctx)
router := routing.NewMemoryRouter(map[string][]netip.AddrPort{}, netip.MustParseAddrPort("127.0.0.1:5000"))
g, gCtx := errgroup.WithContext(ctx)
g.Go(func() error {
return Track(gCtx, ociStore, router, tt.resolveLatestTag)
})
time.Sleep(100 * time.Millisecond)
ctx, cancel := context.WithCancel(context.TODO())
go func() {
time.Sleep(2 * time.Second)
cancel()
}()
err := Track(ctx, ociClient, router, tt.resolveLatestTag)
require.NoError(t, err)
for _, img := range imgs {
peers, ok := router.Lookup(img.Digest.String())
peers, ok := router.LookupKey(img.Digest.String())
require.True(t, ok)
require.Len(t, peers, 1)
tagName, ok := img.TagName()
if !ok {
continue
}
peers, ok = router.Lookup(tagName)
peers, ok = router.LookupKey(tagName)
if img.IsLatestTag() && !tt.resolveLatestTag {
require.False(t, ok)
continue
@ -100,10 +68,6 @@ func TestTrack(t *testing.T) {
require.True(t, ok)
require.Len(t, peers, 1)
}
cancel()
err := g.Wait()
require.NoError(t, err)
})
}
}

48
pkg/throttle/byterate.go Normal file
View File

@ -0,0 +1,48 @@
package throttle
import (
"fmt"
"regexp"
"strconv"
)
var unmarshalRegex = regexp.MustCompile(`^(\d+)\s?([KMGT]?Bps)$`)
type Byterate int64
const (
Bps Byterate = 1
KBps = 1024 * Bps
MBps = 1024 * KBps
GBps = 1024 * MBps
TBps = 1024 * GBps
)
func (br *Byterate) UnmarshalText(b []byte) error {
comps := unmarshalRegex.FindStringSubmatch(string(b))
if len(comps) != 3 {
return fmt.Errorf("invalid byterate format %s should be n Bps, n KBps, n MBps, n GBps, or n TBps", string(b))
}
v, err := strconv.Atoi(comps[1])
if err != nil {
return err
}
unitStr := comps[2]
var unit Byterate
switch unitStr {
case "Bps":
unit = Bps
case "KBps":
unit = KBps
case "MBps":
unit = MBps
case "GBps":
unit = GBps
case "TBps":
unit = TBps
default:
return fmt.Errorf("unknown unit %s", unitStr)
}
*br = Byterate(v) * unit
return nil
}

View File

@ -0,0 +1,67 @@
package throttle
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestByterateUnmarshalValid(t *testing.T) {
tests := []struct {
input string
expected Byterate
}{
{
input: "1 Bps",
expected: 1 * Bps,
},
{
input: "31 KBps",
expected: 31 * KBps,
},
{
input: "42 MBps",
expected: 42 * MBps,
},
{
input: "120 GBps",
expected: 120 * GBps,
},
{
input: "3TBps",
expected: 3 * TBps,
},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
var br Byterate
err := br.UnmarshalText([]byte(tt.input))
require.NoError(t, err)
require.Equal(t, tt.expected, br)
})
}
}
func TestByterateUnmarshalInvalid(t *testing.T) {
tests := []struct {
input string
}{
{
input: "foobar",
},
{
input: "1 Mbps",
},
{
input: "1.1 MBps",
},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
var br Byterate
err := br.UnmarshalText([]byte(tt.input))
require.EqualError(t, err, fmt.Sprintf("invalid byterate format %s should be n Bps, n KBps, n MBps, n GBps, or n TBps", tt.input))
})
}
}

48
pkg/throttle/throttle.go Normal file
View File

@ -0,0 +1,48 @@
package throttle
import (
"fmt"
"io"
"time"
"golang.org/x/time/rate"
)
const burstLimit = 1024 * 1024 * 1024 // 1GB
type Throttler struct {
limiter *rate.Limiter
}
func NewThrottler(br Byterate) *Throttler {
limiter := rate.NewLimiter(rate.Limit(br), burstLimit)
limiter.AllowN(time.Now(), burstLimit)
return &Throttler{
limiter: limiter,
}
}
func (t *Throttler) Writer(w io.Writer) io.Writer {
return &writer{
limiter: t.limiter,
writer: w,
}
}
type writer struct {
limiter *rate.Limiter
writer io.Writer
}
func (w *writer) Write(p []byte) (int, error) {
n, err := w.writer.Write(p)
if err != nil {
return 0, err
}
r := w.limiter.ReserveN(time.Now(), n)
if !r.OK() {
return n, fmt.Errorf("write size %d exceeds limiters burst %d", n, w.limiter.Burst())
}
time.Sleep(r.Delay())
return n, nil
}

View File

@ -0,0 +1,26 @@
package throttle
import (
"bytes"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestThrottler(t *testing.T) {
br := 500 * Bps
throttler := NewThrottler(br)
w := throttler.Writer(bytes.NewBuffer([]byte{}))
chunkSize := 100
start := time.Now()
for i := 0; i < 10; i++ {
b := make([]byte, chunkSize)
n, err := w.Write(b)
require.NoError(t, err)
require.Equal(t, chunkSize, n)
}
d := time.Since(start)
require.Greater(t, d, 2*time.Second)
require.Less(t, d, 3*time.Second)
}

36
test/benchmark/.gitignore vendored Normal file
View File

@ -0,0 +1,36 @@
benchmark.kubeconfig
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc

52
test/benchmark/README.md Normal file
View File

@ -0,0 +1,52 @@
# Benchmark
The benchmark measures image pull performance in realistic scenarios. The purpose is to validate the expected performance of Spegel and give an indication of the expected performance.
Spegel works best when deploying multiple replicas of an application, as the same image needs to be pulled to multiple nodes resulting in the image being pulled from Spegel.
For this reason, the benchmark consists of two steps. The first step will deploy a daemonset to the cluster. During this step all pods will be deployed at once and all images pulled at the same time.
It tests the worst condition for Spegel as none of the nodes will have the cached image creating a race to pull it first. However, as the image is pulled to the first node it will enable other nodes to pull the image.
The second step updates the daemonset with a new version of the image. Each pod will be replaced one at a time when updating a daemonset. This scenario is better for Spegel.
The first node will need to pull the image from the registry but the second will be able to pull the image from the first node.
In theory, both of these steps should result in a faster overall image pull time and a similar image pull time from the registry.
## Method
This method describes the process of running the benchmarks on an AKS cluster created with the accompanied Terraform. Replace the kubeconfig to run the benchmark on another cluster.
Start by creating the AKS cluster. A kubeconfig file will be created in the terraform directory after the AKS cluster has been successfully created.
```bash
cd terraform
terraform init
terraform apply
cd ..
```
Run the benchmark without Spegel installed. The first Nginx image is small and has a few layers while the second Plex image is a lot larger. The benchmark will output the path to a directory containing the results.
```bash
go run benchmark.go benchmark --result-dir ./results --name nginx-without-spegel --kubeconfig ./terraform/benchmark.kubeconfig --namespace spegel-benchmark --images ghcr.io/mirrorshub/docker/nginx:1.24-alpine ghcr.io/mirrorshub/docker/nginx:1.25-alpine
go run benchmark.go benchmark --result-dir ./results --name plex-without-spegel --kubeconfig ./terraform/benchmark.kubeconfig --namespace spegel-benchmark --images ghcr.io/linuxserver/plex:1.31.0 ghcr.io/linuxserver/plex:1.32.0
```
Deploy Spegel in the cluster and wait for all of the pods to run.
```bash
export KUBECONFIG=$(pwd)/terraform/benchmark.kubeconfig
helm upgrade --create-namespace --namespace spegel --install --version $VERSION spegel oci://ghcr.io/spegel-org/helm-charts/spegel
kubectl --namespace spegel rollout status daemonset spegel --timeout 60s
```
Run the same benchmarks as before, now with Spegel installed.
```bash
go run benchmark.go benchmark --result-dir ./results --name nginx-with-spegel --kubeconfig ./terraform/benchmark.kubeconfig --namespace spegel-benchmark --images ghcr.io/mirrorshub/docker/nginx:1.24-alpine ghcr.io/mirrorshub/docker/nginx:1.25-alpine
go run benchmark.go benchmark --result-dir ./results --name plex-with-spegel --kubeconfig ./terraform/benchmark.kubeconfig --namespace spegel-benchmark --images ghcr.io/linuxserver/plex:1.31.0 ghcr.io/linuxserver/plex:1.32.0
```
Destroy the AKS cluster as it is no longer needed.
```bash
cd terraform
terraform destroy
cd ..
```

454
test/benchmark/benchmark.go Normal file
View File

@ -0,0 +1,454 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"os/signal"
"path"
"regexp"
"strings"
"syscall"
"time"
"image/color"
"github.com/alexflint/go-arg"
"golang.org/x/exp/slices"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
"gonum.org/v1/plot/vg/draw"
"gonum.org/v1/plot/vg/vgimg"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/cli-utils/pkg/kstatus/status"
)
type BenchmarkCmd struct {
ResultDir string `arg:"--result-dir,required"`
Name string `arg:"--name,required"`
KubeconfigPath string `arg:"--kubeconfig,required"`
Namespace string `arg:"--namespace,required"`
Images []string `arg:"--images,required"`
}
type AnalyzeCmd struct {
Path string `args:"--path"`
}
type Arguments struct {
Benchmark *BenchmarkCmd `arg:"subcommand:benchmark"`
Analyze *AnalyzeCmd `arg:"subcommand:analyze"`
}
func main() {
args := &Arguments{}
arg.MustParse(args)
err := run(*args)
if err != nil {
fmt.Println("unexpected error:", err)
os.Exit(1)
}
}
func run(args Arguments) error {
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM)
defer cancel()
switch {
case args.Benchmark != nil:
return benchmark(ctx, *args.Benchmark)
case args.Analyze != nil:
return analyze(ctx, *args.Analyze)
default:
return fmt.Errorf("unknown command")
}
}
type Result struct {
Name string
Benchmarks []Benchmark
}
type Benchmark struct {
Image string
Measurements []Measurement
}
type Measurement struct {
Start time.Time
Stop time.Time
Duration time.Duration
}
func benchmark(ctx context.Context, args BenchmarkCmd) error {
cfg, err := clientcmd.BuildConfigFromFlags("", args.KubeconfigPath)
if err != nil {
return err
}
cs, err := kubernetes.NewForConfig(cfg)
if err != nil {
return err
}
dc, err := dynamic.NewForConfig(cfg)
if err != nil {
return err
}
ts := time.Now().Unix()
runName := fmt.Sprintf("spegel-benchmark-%d", ts)
_, err = cs.CoreV1().Namespaces().Get(ctx, args.Namespace, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
if errors.IsNotFound(err) {
ns := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: args.Namespace,
},
}
_, err := cs.CoreV1().Namespaces().Create(ctx, &ns, metav1.CreateOptions{})
if err != nil {
return err
}
}
err = clearImages(ctx, cs, dc, args.Namespace, args.Images)
if err != nil {
return err
}
defer func() {
cs.AppsV1().DaemonSets(args.Namespace).Delete(ctx, runName, metav1.DeleteOptions{})
}()
result := Result{
Name: args.Name,
}
for _, image := range args.Images {
bench, err := measureImagePull(ctx, cs, dc, args.Namespace, runName, image)
if err != nil {
return err
}
result.Benchmarks = append(result.Benchmarks, bench)
}
err = clearImages(ctx, cs, dc, args.Namespace, args.Images)
if err != nil {
return err
}
fileName := fmt.Sprintf("%s.json", args.Name)
file, err := os.Create(path.Join(args.ResultDir, fileName))
if err != nil {
return err
}
defer file.Close()
b, err := json.MarshalIndent(result, "", " ")
if err != nil {
return err
}
_, err = file.Write(b)
if err != nil {
return err
}
return nil
}
func clearImages(ctx context.Context, cs kubernetes.Interface, dc dynamic.Interface, namespace string, images []string) error {
remove := fmt.Sprintf("crictl rmi %s || true", strings.Join(images, " "))
commands := []string{"/bin/sh", "-c", fmt.Sprintf("chroot /host /bin/bash -c '%s'; sleep infinity;", remove)}
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "spegel-clear-image",
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "spegel-clear-image"},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "spegel-clear-image",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "clear",
Image: "docker.io/library/alpine:3.18.4@sha256:48d9183eb12a05c99bcc0bf44a003607b8e941e1d4f41f9ad12bdcc4b5672f86",
ImagePullPolicy: "IfNotPresent",
Command: commands,
Stdin: true,
VolumeMounts: []corev1.VolumeMount{
{
Name: "host-root",
MountPath: "/host",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "host-root",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/",
},
},
},
},
},
},
},
}
_, err := cs.AppsV1().DaemonSets(namespace).Create(ctx, ds, metav1.CreateOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
defer func() {
cs.AppsV1().DaemonSets(namespace).Delete(ctx, ds.ObjectMeta.Name, metav1.DeleteOptions{})
}()
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Minute, true, func(ctx context.Context) (done bool, err error) {
gvr := schema.GroupVersionResource{
Group: "apps",
Version: "v1",
Resource: "daemonsets",
}
u, err := dc.Resource(gvr).Namespace(namespace).Get(ctx, ds.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
res, err := status.Compute(u)
if err != nil {
return false, err
}
if res.Status != status.CurrentStatus {
return false, nil
}
return true, nil
})
if err != nil {
return err
}
return nil
}
func measureImagePull(ctx context.Context, cs kubernetes.Interface, dc dynamic.Interface, namespace, name, image string) (Benchmark, error) {
ds, err := cs.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return Benchmark{}, err
}
if errors.IsNotFound(err) {
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": name},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": name,
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "benchmark",
Image: image,
ImagePullPolicy: "IfNotPresent",
// Keep container running
Stdin: true,
},
},
},
},
},
}
ds, err := cs.AppsV1().DaemonSets(namespace).Create(ctx, ds, metav1.CreateOptions{})
if err != nil {
return Benchmark{}, err
}
} else {
ds.Spec.Template.Spec.Containers[0].Image = image
_, err := cs.AppsV1().DaemonSets(namespace).Update(ctx, ds, metav1.UpdateOptions{})
if err != nil {
return Benchmark{}, err
}
}
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 30*time.Minute, true, func(ctx context.Context) (done bool, err error) {
gvr := schema.GroupVersionResource{
Group: "apps",
Version: "v1",
Resource: "daemonsets",
}
u, err := dc.Resource(gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, err
}
res, err := status.Compute(u)
if err != nil {
return false, err
}
if res.Status != status.CurrentStatus {
return false, nil
}
return true, nil
})
if err != nil {
return Benchmark{}, err
}
podList, err := cs.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", name)})
if err != nil {
return Benchmark{}, err
}
if len(podList.Items) == 0 {
return Benchmark{}, fmt.Errorf("received empty benchmark pod list")
}
bench := Benchmark{
Image: image,
}
for _, pod := range podList.Items {
eventList, _ := cs.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("involvedObject.name=%s", pod.Name), TypeMeta: metav1.TypeMeta{Kind: "Pod"}})
if err != nil {
return Benchmark{}, err
}
pullingEvent, err := getEvent(eventList.Items, "Pulling")
if err != nil {
return Benchmark{}, err
}
pulledEvent, err := getEvent(eventList.Items, "Pulled")
if err != nil {
return Benchmark{}, err
}
d, err := parsePullMessage(pulledEvent.Message)
if err != nil {
return Benchmark{}, err
}
bench.Measurements = append(bench.Measurements, Measurement{Start: pullingEvent.FirstTimestamp.Time, Stop: pullingEvent.FirstTimestamp.Time.Add(d), Duration: d})
}
return bench, nil
}
func getEvent(events []corev1.Event, reason string) (corev1.Event, error) {
for _, event := range events {
if event.Reason != reason {
continue
}
return event, nil
}
return corev1.Event{}, fmt.Errorf("could not find event with reason %s", reason)
}
func parsePullMessage(msg string) (time.Duration, error) {
r, err := regexp.Compile(`\((.*) including waiting\)`)
if err != nil {
return 0, err
}
match := r.FindStringSubmatch(msg)
if len(match) < 2 {
return 0, fmt.Errorf("could not find image pull duration")
}
s := match[1]
d, err := time.ParseDuration(s)
if err != nil {
return 0, err
}
return d, nil
}
func analyze(ctx context.Context, args AnalyzeCmd) error {
b, err := os.ReadFile(args.Path)
if err != nil {
return err
}
result := Result{}
err = json.Unmarshal(b, &result)
if err != nil {
return err
}
ext := path.Ext(args.Path)
outPath := strings.TrimSuffix(args.Path, ext)
outPath = fmt.Sprintf("%s.png", outPath)
err = createPlot(result, outPath)
if err != nil {
return err
}
return nil
}
func createPlot(result Result, path string) error {
plots := []*plot.Plot{}
for _, bench := range result.Benchmarks {
p := plot.New()
p.Title.Text = bench.Image
p.Title.Padding = vg.Points(10)
p.Y.Label.Text = "Pod Number"
p.X.Label.Text = "Time [ms]"
slices.SortFunc(bench.Measurements, func(a, b Measurement) int {
if a.Start == b.Start {
return a.Stop.Compare(b.Stop)
}
return a.Start.Compare(b.Start)
})
zeroTime := bench.Measurements[0].Start
max := int64(0)
min := int64(0)
for i, result := range bench.Measurements {
if i == 0 || result.Duration.Milliseconds() < min {
min = result.Duration.Milliseconds()
}
if i == 0 || result.Duration.Milliseconds() > max {
max = result.Duration.Milliseconds()
}
start := result.Start.Sub(zeroTime)
stop := start + result.Duration
b, err := plotter.NewBoxPlot(4, float64(len(bench.Measurements)-i-1), plotter.Values{float64(start.Milliseconds()), float64(stop.Milliseconds())})
if err != nil {
return err
}
b.Horizontal = true
b.FillColor = color.Black
p.Add(b)
}
plots = append(plots, p)
}
img := vgimg.New(vg.Points(700), vg.Points(300))
dc := draw.New(img)
t := draw.Tiles{
Rows: 1,
Cols: len(plots),
PadX: vg.Millimeter,
PadY: vg.Millimeter,
PadTop: vg.Points(10),
PadBottom: vg.Points(10),
PadLeft: vg.Points(10),
PadRight: vg.Points(10),
}
canv := plot.Align([][]*plot.Plot{plots}, t, dc)
for i, plot := range plots {
plot.Draw(canv[0][i])
}
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
png := vgimg.PngCanvas{Canvas: img}
if _, err := png.WriteTo(file); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,27 @@
package main
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestParsePullMessage(t *testing.T) {
s := "Successfully pulled image \"docker.io/library/nginx:mainline-alpine\" in 873.420598ms (873.428863ms including waiting)"
d, err := parsePullMessage(s)
require.NoError(t, err)
require.Equal(t, 873428863*time.Nanosecond, d)
}
func TestCreatePlot(t *testing.T) {
results := []dsResult{}
for i := 1; i <= 10; i++ {
d, err := time.ParseDuration(fmt.Sprintf("%ds", i))
require.NoError(t, err)
results = append(results, dsResult{start: time.Now().Add(d), duration: d})
}
err := createPlot(results)
require.NoError(t, err)
}

64
test/benchmark/go.mod Normal file
View File

@ -0,0 +1,64 @@
module github.com/spegel-org/spegel/test/benchmark
go 1.21.3
require (
github.com/alexflint/go-arg v1.4.3
github.com/stretchr/testify v1.8.4
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
gonum.org/v1/plot v0.14.0
k8s.io/api v0.28.3
k8s.io/apimachinery v0.28.3
k8s.io/client-go v0.28.3
sigs.k8s.io/cli-utils v0.35.0
)
require (
git.sr.ht/~sbinet/gg v0.5.0 // indirect
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect
github.com/alexflint/go-scalar v1.1.0 // indirect
github.com/campoy/embedmd v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/go-fonts/liberation v0.3.1 // indirect
github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-pdf/fpdf v0.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/image v0.11.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

223
test/benchmark/go.sum Normal file
View File

@ -0,0 +1,223 @@
git.sr.ht/~sbinet/cmpimg v0.1.0 h1:E0zPRk2muWuCqSKSVZIWsgtU9pjsw3eKHi8VmQeScxo=
git.sr.ht/~sbinet/cmpimg v0.1.0/go.mod h1:FU12psLbF4TfNXkKH2ZZQ29crIqoiqTZmeQ7dkp/pxE=
git.sr.ht/~sbinet/gg v0.5.0 h1:6V43j30HM623V329xA9Ntq+WJrMjDxRjuAB1LFWF5m8=
git.sr.ht/~sbinet/gg v0.5.0/go.mod h1:G2C0eRESqlKhS7ErsNey6HHrqU1PwsnCQlekFi9Q2Oo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw=
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
github.com/alexflint/go-arg v1.4.3 h1:9rwwEBpMXfKQKceuZfYcwuc/7YY7tWJbFsgG5cAU/uo=
github.com/alexflint/go-arg v1.4.3/go.mod h1:3PZ/wp/8HuqRZMUUgu7I+e1qcpUbvmS258mRXkFH4IA=
github.com/alexflint/go-scalar v1.1.0 h1:aaAouLLzI9TChcPXotr6gUhq+Scr8rl0P9P4PnltbhM=
github.com/alexflint/go-scalar v1.1.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o=
github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=
github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
github.com/go-fonts/latin-modern v0.3.1 h1:/cT8A7uavYKvglYXvrdDw4oS5ZLkcOU22fa2HJ1/JVM=
github.com/go-fonts/latin-modern v0.3.1/go.mod h1:ysEQXnuT/sCDOAONxC7ImeEDVINbltClhasMAqEtRK0=
github.com/go-fonts/liberation v0.3.1 h1:9RPT2NhUpxQ7ukUvz3jeUckmN42T9D9TpjtQcqK/ceM=
github.com/go-fonts/liberation v0.3.1/go.mod h1:jdJ+cqF+F4SUL2V+qxBth8fvBpBDS7yloUL5Fi8GTGY=
github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 h1:NxXI5pTAtpEaU49bpLpQoDsu1zrteW/vxzTz8Cd2UAs=
github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9/go.mod h1:gWuR/CrFDDeVRFQwHPvsv9soJVB/iqymhuZQuJ3a9OM=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-pdf/fpdf v0.8.0 h1:IJKpdaagnWUeSkUFUjTcSzTppFxmv8ucGQyNPQWxYOQ=
github.com/go-pdf/fpdf v0.8.0/go.mod h1:gfqhcNwXrsd3XYKte9a7vM3smvU/jB4ZRDrmWSxpfdc=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0=
gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU=
gonum.org/v1/plot v0.14.0 h1:+LBDVFYwFe4LHhdP8coW6296MBEY4nQ+Y4vuUpJopcE=
gonum.org/v1/plot v0.14.0/go.mod h1:MLdR9424SJed+5VqC6MsouEpig9pZX2VZ57H9ko2bXU=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM=
k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc=
k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A=
k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8=
k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4=
k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
sigs.k8s.io/cli-utils v0.35.0 h1:dfSJaF1W0frW74PtjwiyoB4cwdRygbHnC7qe7HF0g/Y=
sigs.k8s.io/cli-utils v0.35.0/go.mod h1:ITitykCJxP1vaj1Cew/FZEaVJ2YsTN9Q71m02jebkoE=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

Some files were not shown because too many files have changed in this diff Show More