一、安装配置nginx
1.需要准备一个可以访问外网的服务器
2.安装nginx
3.准备域名解析到服务器,然后把证书配置到nginx里面
# /etc/nginx/sites-available/docker-mirror
# DNS for variable proxy_pass
resolver 1.1.1.1 8.8.8.8 valid=300s ipv6=off;
# Cache (only used under /v2/)
proxy_cache_path /var/cache/nginx/docker levels=1:2 keys_zone=docker_cache:50m max_size=300g inactive=7d use_temp_path=off;
# Registry v2 header
map $http_docker_distribution_api_version $docker_api_version { default "registry/2.0"; }
# expose cache status
map $upstream_cache_status $cache_status { default $upstream_cache_status; "" "BYPASS"; }
server {
listen 443 ssl http2;
# listen 443 ssl http2 default_server;
server_name xing.axzys.cn;
ssl_certificate /etc/nginx/ssl/xing.axzys.cn.pem;
ssl_certificate_key /etc/nginx/ssl/xing.axzys.cn.key;
client_max_body_size 0;
proxy_http_version 1.1;
proxy_connect_timeout 60s;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
# 默认流式
proxy_buffering off;
proxy_request_buffering off;
proxy_set_header Connection "";
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Docker-Distribution-Api-Version $docker_api_version;
# 全局打开缓存(/_proxy、/token 会单独关闭)
proxy_cache docker_cache;
proxy_cache_lock on;
proxy_cache_revalidate on;
proxy_cache_min_uses 1;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_valid 200 206 302 10m;
add_header X-Cache-Status $cache_status always;
# 把上游 3xx Location 改写到 /_proxy/<host>/<path?query>
proxy_redirect ~^https://(?<h>[^/]+)(?<p>/.*)$ https://$server_name/_proxy/$h$p;
# ---------- token endpoint(Docker Hub 专用) ----------
location = /token {
proxy_pass https://auth.docker.io/token$is_args$args;
proxy_set_header Host auth.docker.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Authorization "";
proxy_cache off;
proxy_buffering off;
proxy_http_version 1.1;
proxy_connect_timeout 30s;
proxy_read_timeout 30s;
proxy_send_timeout 30s;
}
# ---------- GHCR token 代领 ----------
location = /ghcr-token {
proxy_pass https://ghcr.io/token$is_args$args;
proxy_set_header Host ghcr.io;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Authorization "";
proxy_cache off;
proxy_buffering off;
proxy_http_version 1.1;
proxy_connect_timeout 30s;
proxy_read_timeout 30s;
proxy_send_timeout 30s;
}
# ---------- /v2/ -> Docker Hub ----------
location ^~ /v2/ {
set $upstream_host "registry-1.docker.io";
proxy_set_header Host $upstream_host;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
# 引导客户端去我们的 /token
proxy_hide_header WWW-Authenticate;
add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/token",service="registry.docker.io"' always;
proxy_buffering off;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# ================= 其余注册中心(带前缀)=================
# 先 set 再 rewrite;必要时仅对 GHCR 改写 WWW-Authenticate 到本地 /ghcr-token
# ghcr.io
location ^~ /ghcr/ {
set $upstream_host "ghcr.io";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
# 去掉前缀
rewrite ^/ghcr(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
# 关键:把令牌下发到你自己的 /ghcr-token,避免客户端直连 ghcr.io/token 403/网络问题
proxy_hide_header WWW-Authenticate;
add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/ghcr-token",service="ghcr.io"' always;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# gcr.io
location ^~ /gcr/ {
set $upstream_host "gcr.io";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
rewrite ^/gcr(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# registry.k8s.io
location ^~ /rk8s/ {
set $upstream_host "registry.k8s.io";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
rewrite ^/rk8s(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# 兼容 k8s.gcr.io -> registry.k8s.io
location ^~ /kgcr/ {
set $upstream_host "registry.k8s.io";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
rewrite ^/kgcr(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# mcr.microsoft.com
location ^~ /mcr/ {
set $upstream_host "mcr.microsoft.com";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
rewrite ^/mcr(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# nvcr.io
location ^~ /nvcr/ {
set $upstream_host "nvcr.io";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
rewrite ^/nvcr(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# quay.io
location ^~ /quay/ {
set $upstream_host "quay.io";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
rewrite ^/quay(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# docker.elastic.co
location ^~ /elastic/ {
set $upstream_host "docker.elastic.co";
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
rewrite ^/elastic(?<rest>/.*)$ $rest break;
slice 1m;
proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
# ---------- /_proxy/<host>/<path?query> -> 对象存储/CDN ----------
location ~ ^/_proxy/(?<h>[^/]+)(?<p>/.*)$ {
if ($h !~* ^(registry-1\.docker\.io|auth\.docker\.io|production\.cloudflare\.docker\.com|.*\.cloudflarestorage\.com|.*\.r2\.cloudflarestorage\.com|.*\.amazonaws\.com|storage\.googleapis\.com|.*\.googleapis\.com|.*\.pkg\.dev|ghcr\.io|github\.com|pkg-containers\.[^/]*githubusercontent\.com|objects\.githubusercontent\.com|.*\.blob\.core\.windows\.net|.*\.azureedge\.net|mcr\.microsoft\.com|.*\.microsoft\.com|quay\.io|cdn\.quay\.io|.*quay-cdn[^/]*\.redhat\.com|k8s\.gcr\.io|registry\.k8s\.io|gcr\.io|docker\.elastic\.co|.*\.elastic\.co|.*\.cloudfront\.net|.*\.fastly\.net)$) {
return 403;
}
set $upstream_host $h;
# 去掉 '/_proxy/<host>' 前缀
rewrite ^/_proxy/[^/]+(?<rest>/.*)$ $rest break;
# 正确 Host 与 SNI
proxy_set_header Host $upstream_host;
proxy_ssl_server_name on;
proxy_ssl_name $upstream_host;
proxy_ssl_protocols TLSv1.2 TLSv1.3;
proxy_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;
proxy_ssl_verify on;
proxy_ssl_verify_depth 2;
# 只透传客户端 Range
proxy_set_header Range $http_range;
# 不缓存预签名 URL;不缓冲
proxy_redirect off;
proxy_cache off;
proxy_buffering off;
proxy_request_buffering off;
# 避免把任何 Authorization 透传
proxy_set_header Authorization "";
# 不带 URI 的 proxy_pass
proxy_pass https://$upstream_host;
access_log /var/log/nginx/docker_mirror_access.log;
error_log /var/log/nginx/docker_mirror_error.log warn;
}
location = /healthz { return 200 'ok'; add_header Content-Type text/plain; }
}
# HTTP -> HTTPS
server {
listen 80;
server_name xing.axzys.cn;
return 301 https://$host$request_uri;
}
二、配置k8s客户端
vi /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"]
endpoint = ["https://k8s-registry.local"]
#完整配置
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
drain_exec_sync_io_timeout = "0s"
enable_cdi = false
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_deprecation_warnings = []
ignore_image_defined_volumes = false
image_pull_progress_timeout = "5m0s"
image_pull_with_sync_fs = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "registry.cn-guangzhou.aliyuncs.com/xingcangku/eeeee:3.8"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
setup_serially = false
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_blockio_not_enabled_errors = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
sandbox_mode = ""
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
sandbox_mode = "podsandbox"
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
sandbox_mode = ""
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
#[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"]
endpoint = ["https://15.164.211.114"]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.nri.v1.nri"]
disable = true
disable_connections = false
plugin_config_path = "/etc/containerd/certs.d"
plugin_path = "/opt/nri/plugins"
plugin_registration_timeout = "5s"
plugin_request_timeout = "2s"
socket_path = "/var/run/nri/nri.sock"
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
sync_fs = false
[plugins."io.containerd.service.v1.tasks-service"]
blockio_config_file = ""
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.blockfile"]
fs_type = ""
mount_options = []
root_path = ""
scratch_file = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
mount_options = []
root_path = ""
sync_remove = false
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
[plugins."io.containerd.transfer.v1.local"]
config_path = "/etc/containerd/certs.d"
max_concurrent_downloads = 3
max_concurrent_uploaded_layers = 3
[[plugins."io.containerd.transfer.v1.local".unpack_config]]
differ = ""
platform = "linux/amd64"
snapshotter = "overlayfs"
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.metrics.shimstats" = "2s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0
sudo mkdir -p /etc/containerd/certs.d/{docker.io,ghcr.io,gcr.io,registry.k8s.io,k8s.gcr.io,mcr.microsoft.com,nvcr.io,quay.io,docker.elastic.co}
root@k8s-03:/etc/containerd/certs.d# cat /etc/containerd/certs.d/ghcr.io/hosts.toml
server = "https://ghcr.io"
[host."https://xing.axzys.cn/ghcr/v2"]
capabilities = ["pull", "resolve"]
override_path = true
skip_verify = false
root@k8s-03:~# cat /etc/containerd/certs.d/docker.io/hosts.toml
server = "https://registry-1.docker.io"
[host."https://xing.axzys.cn"]
capabilities = ["pull", "resolve"]
skip_verify = false
root@k8s-03:~# cat /etc/containerd/certs.d/k8s.gcr.io/hosts.toml
server = "https://k8s.gcr.io"
[host."https://xing.axzys.cn/kgcr/2"]
capabilities = ["pull", "resolve"]
override_path = true
skip_verify = false
root@k8s-03:~# cat /etc/containerd/certs.d/registry.k8s.io/hosts.toml
server = "https://registry.k8s.io"
[host."https://xing.axzys.cn/rk8s/v2"]
capabilities = ["pull", "resolve"]
override_path = true
skip_verify = false
root@k8s-03:~# cat /etc/containerd/certs.d/registry-1.docker.io/hosts.toml
server = "https://registry-1.docker.io"
[host."https://xing.axzys.cn"]
capabilities = ["pull", "resolve"]
skip_verify = false
root@k8s-03:~# cat /etc/containerd/certs.d/quay.io/hosts.toml
server = "https://quay.io"
[host."https://xing.axzys.cn/quay/v2"]
capabilities = ["pull", "resolve"]
override_path = true
skip_verify = false
root@k8s-03:~# cat /etc/containerd/certs.d/docker.elastic.co/hosts.toml
server = "https://docker.elastic.co"
[host."https://xing.axzys.cn/elastic/2"]
capabilities = ["pull", "resolve"]
override_path = true
skip_verify = false
root@k8s-03:~# cat /etc/containerd/certs.d/ghcr.io/hosts.toml
server = "https://ghcr.io"
[host."https://xing.axzys.cn/ghcr/v2"]
capabilities = ["pull", "resolve"]
override_path = true
skip_verify = false
#重启containerd生效
sudo systemctl restart containerd
三、测试拉取
#测试拉取镜像
root@k8s-03:/etc/containerd/certs.d# sudo nerdctl -n k8s.io --debug pull docker.io/library/alpine:3.15
DEBU[0000] verifying process skipped
DEBU[0000] The image will be unpacked for platform {"amd64" "linux" "" [] ""}, snapshotter "overlayfs".
DEBU[0000] fetching image="docker.io/library/alpine:3.15"
DEBU[0000] loading host directory dir=/etc/containerd/certs.d/docker.io
DEBU[0000] resolving host=xing.axzys.cn
DEBU[0000] do request host=xing.axzys.cn request.header.accept="application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*" request.header.user-agent=containerd/2.1.1+unknown request.method=HEAD url="https://xing.axzys.cn/v2/library/alpine/manifests/3.15?ns=docker.io"
docker.io/library/alpine:3.15: resolving |--------------------------------------|
elapsed: 0.9 s total: 0.0 B (0.0 B/s)
DEBU[0001] fetch response received host=xing.axzys.cn response.header.connection=keep-alive response.header.content-length=157 response.header.content-type=application/json response.header.date="Sat, 23 Aug 2025 16:41:57 GMT" response.header.docker-distribution-api-version=registry/2.0 response.header.docker-ratelimit-source=15.164.211.114 response.header.server=nginx response.header.strict-transport-security="max-age=31536000" response.header.www-authenticate="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" response.status="401 Unauthorized" url="https://xing.axzys.cn/v2/library/alpine/manifests/3.15?ns=docker.io"
DEBU[0001] Unauthorized header="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" host=xing.axzys.cn
DEBU[0001] no scope specified for token auth challenge host=xing.axzys.cn
DEBU[0001] do request host=xing.axzys.cn request.header.accept="application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, applicatio
docker.io/library/alpine:3.15: resolving |--------------------------------------|
elapsed: 3.3 s total: 0.0 B (0.0 B/s)
DEBU[0003] fetch response received host=xing.axzys.cn response.header.connection=keep-alive response.header.content-length=1638 response.header.content-type=application/vnd.docker.distribution.manifest.list.v2+json response.header.date="Sat, 23 Aug 2025 16:42:00 GMT" response.header.docker-content-digest="sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864" response.header.docker-distribution-api-version=registry/2.0 response.header.docker-ratelimit-source=15.164.211.114 response.header.etag="\"sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864\"" response.header.ratelimit-limit="100;w=21600" response.header.ratelimit-remaining="92;w=21600" response.header.server=nginx response.header.strict-transport-security="max-age=31536000" response.header.www-authenticate="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" response.status="200 OK" url="https://xing.axzys.cn/v2/library/alpine/manifests/3.15?ns=docker.io"
DEBU[0003] resolved desc.digest="sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864" host=xing.axzys.cn
DEBU[0003] loading host directory dir=/etc/containerd/certs.d/docker.io
docker.io/library/alpine:3.15: resolving |--------------------------------------|
elapsed: 3.4 s total: 0.0 B (0.0 B/s)
DEBU[0003] fetch digest="sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5" mediatype=application/vnd.docker.distribution.manifest.v2+json size=528
DEBU[0003] fetch digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json size=1472
DEBU[0003] fetching layer chunk_size=0 digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" initial_parallelism=0 mediatype=application/vnd.docker.container.image.v1+json offset=0 parallelism=1 size=1472
DEBU[0003] do request digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json request.header.accept="application/vnd.docker.container.image.v1+json, */*" request.header.accept-encoding="zstd;q=1.0, gzip;q=0.8, deflate;q=0.5" request.header.range="bytes=0-" request.header.user-agent
docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++|
config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: downloading |--------------------------------------| 0.0 B/1.4 KiB
elapsed: 4.4 s total: 0.0 B (0.0 B/s)
DEBU[0004] fetch response received digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json response.header.connection=keep-alive response.header.content-length=157 response.header.content-type=application/json response.header.date="Sat, 23 Aug 2025 16:42:01 GMT" response.header.docker-distribution-api-version=registry/2.0 response.header.server=nginx response.header.strict-transport-security="max-age=31536000" response.header.www-authenticate="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" response.status="401 Unauthorized" size=1472 url="https://xing.axzys.cn/v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io"
docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++|
config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: downloading |--------------------------------------| 0.0 B/1.4 KiB
elapsed: 8.2 s total: 0.0 B (0.0 B/s)
DEBU[0008] fetch response received digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json response.header.accept-ranges=bytes response.header.cf-ray=973c0fa05a2930d3-ICN response.header.connection=keep-alive response.header.content-length=1472 response.header.content-range="bytes 0-1471/1472" response.header.content-type=application/octet-stream response.header.date="Sat, 23 Aug 2025 16:42:04 GMT" response.header.etag="\"aa36606459d6778a94123c7d6a33396b\"" response.header.last-modified="Fri, 13 Dec 2024 15:03:06 GMT" response.header.server=nginx response.header.vary=Accept-Encoding response.header.x-cache-status=BYPASS response.status="206 Partial Content" size=1472 url="https://xing.axzys.cn/v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io"
docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++|
config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: done |+++++++++++++++++++++++++++++++++++
docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++|
docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++|
index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++|
config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 86.3s total: 2.7 Mi (32.0 KiB/s)
#使用k8syaml文件拉取
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Pulled 7m2s (x6 over 5h7m) kubelet Container image "docker.io/library/alpine:3.9" already present on machine
Normal Created 7m2s (x6 over 5h7m) kubelet Created container test-container
Normal Started 7m1s (x6 over 5h7m) kubelet Started container test-container
#nginx日志
223.74.152.108 - - [23/Aug/2025:16:41:57 +0000] "HEAD /v2/library/alpine/manifests/3.15?ns=docker.io HTTP/1.1" 401 0 "-" "containerd/2.1.1+unknown"
223.74.152.108 - - [23/Aug/2025:16:42:00 +0000] "HEAD /v2/library/alpine/manifests/3.15?ns=docker.io HTTP/1.1" 200 0 "-" "containerd/2.1.1+unknown"
223.74.152.108 - - [23/Aug/2025:16:42:01 +0000] "GET /v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io HTTP/1.1" 401 157 "-" "containerd/2.1.1+unknown"
223.74.152.108 - - [23/Aug/2025:16:42:04 +0000] "GET /v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io HTTP/1.1" 307 0 "-" "containerd/2.1.1+unknown"
223.74.152.108 - - [23/Aug/2025:16:42:04 +0000] "GET /_proxy/docker-images-prod.6aa30f8b08e16409b46e0173d6de2f56.r2.cloudflarestorage.com/registry-v2/docker/registry/v2/blobs/sha256/32/32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d/data?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=f1baa2dd9b876aeb89efebbfc9e5d5f4%2F20250823%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250823T164203Z&X-Amz-Expires=1200&X-Amz-SignedHeaders=host&X-Amz-Signature=860ec74942b8c48e9922b561b9ef4cfd409dc4acf22daa9a31a45754aff6d32a HTTP/1.1" 206 1472 "https://xing.axzys.cn/v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io" "containerd/2.1.1+unknown"
223.74.152.108 - - [23/Aug/2025:16:42:05 +0000] "GET /v2/library/alpine/blobs/sha256:d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108?ns=docker.io HTTP/1.1" 307 0 "-" "containerd/2.1.1+unknown"
223.74.152.108 - - [23/Aug/2025:16:43:21 +0000] "GET /_proxy/docker-images-prod.6aa30f8b08e16409b46e0173d6de2f56.r2.cloudflarestorage.com/registry-v2/docker/registry/v2/blobs/sha256/d0/d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108/data?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=f1baa2dd9b876aeb89efebbfc9e5d5f4%2F20250823%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250823T164205Z&X-Amz-Expires=1200&X-Amz-SignedHeaders=host&X-Amz-Signature=118da1e073a4589f6a14cb751acfbfdb0c7431fa55703f24d5278e7ec26246a3 HTTP/1.1" 206 2826431 "https://xing.axzys.cn/v2/library/alpine/blobs/sha256:d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108?ns=docker.io" "containerd/2.1.1+unknown"
四、剖析过程和nginx配置
4.1参与者与目标
- Client:nerdctl/containerd
- Mirror:NGINX@ xing.axzys.cn(你这份配置)
- Docker Hub:registry-1.docker.io(镜像 API) + auth.docker.io(发 token)
- 对象存储/CDN(Docker Hub 背后):Cloudflare R2 等(这次命中 *.r2.cloudflarestorage.com)
目标:客户端的所有请求都打到你域名,由 NGINX 统一处理认证、改写 3xx、缓存 /v2/ 下可缓存内容;当上游把大文件重定向到对象存储时,继续保持同域(走你域名的 /_proxy/...),避免直连外网受限/不可达。
4.2按时间线还原整条链路
时间均来自你贴的两段日志(nginx access log 与 nerdctl --debug),相互印证。
4.2.1准备(为什么需要 resolver/SNI 等)
你在 http 块里:
resolver 1.1.1.1 8.8.8.8 ...;
因为后面大量用到了变量形式的 upstream 主机名($upstream_host),Nginx 需要在运行时解 DNS。
在 /v2/ 和 /_proxy/ 中你都开启了:
proxy_ssl_server_name on; proxy_ssl_name $upstream_host;
这样跟上游 TLS 握手时,SNI 会填真实目标域名,证书校验才会通过。
4.2.2HEAD manifest 触发认证(16:41:57 → 401)
HEAD /v2/library/alpine/manifests/3.15?ns=docker.io → 401
WWW-Authenticate: Bearer realm="https://xing.axzys.cn/token", service="registry.docker.io"
谁返回 401? 你的 NGINX(并非 Docker Hub)。
为什么? 你在 /v2/:
proxy_hide_header WWW-Authenticate;
add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/token",service="registry.docker.io"' always;
这会强制把认证引导到你自己的 /token 端点,从而把“领 token”的流量固定在你的域名下(便于出网与审计)。
client 行为:containerd 收到 401 + WWW-Authenticate 后,会去 GET https://xing.axzys.cn/token?... 领一个 Bearer Token(JWT)。
你的 access log 片段里没贴出 /token 的那行,但从后续现象可知它已成功。
nerdctl debug 里出现
Unauthorized ... no scope specified for token auth challenge
这只是 containerd 发起 token 流程的常见提示——第一次 401 只提供了 realm/service,后续在请求具体资源时它会带上 scope=repository:library/alpine:pull 等参数去换真正可用的 token。
4.2.3 拉取 config blob(16:42:01 → 401 → 307)
GET /v2/.../blobs/sha256:32b91e... → 401
...随后同 URL → 307 Location: https://...r2.cloudflarestorage.com/...
- 第一次 401:常见于 token 刷新或 scope 切换;containerd 会透明地再换一次 token(或掉头再次请求),随即你就看到 307 了。
- 307 from Hub/CDN:Docker Hub 对于实际二进制层(包括 config 层)不会直接回源给你,而是下发一个预签名 URL(Cloudflare R2/S3/GCS 等)。
你的 /v2/ 配置有:
proxy_redirect ~^https://(?<h>[^/]+)(?<p>/.*)$ https://$server_name/_proxy/$h$p;
#这会把上游 30x Location 改写成你域名下的 /_proxy/<原host>/<原path?query>,于是客户端继续请求你域名,而不会直连 R2。
4.2.4 通过 /_proxy 去对象存储(16:42:04 → 206)
GET /_proxy/docker-images-prod....r2.cloudflarestorage.com/... → 206 Partial Content
Content-Range: bytes 0-1471/1472
X-Cache-Status: BYPASS
命中你 location ~ ^/_proxy/...
域名白名单严格校验,非允许列表一律 403(你已经列了 R2/S3/GCS/Quay CDN/Azure/Microsoft/Elastic/Cloudfront/Fastly 等)。
SNI/证书校验对齐上游真实主机(proxy_ssl_name $upstream_host; proxy_ssl_verify on;)。
不缓存(proxy_cache off;),不缓冲(proxy_buffering off;),不透传 Authorization(安全起见,proxy_set_header Authorization "";)。
仅透传 Range:proxy_set_header Range $http_range; —— 客户端最常发 Range: bytes=0-,于是上游返回 206 Partial Content。
这次的对象是 config(1472 字节),一口气就拿完了(Content-Range: 0-1471/1472)。
nerdctl debug 里还能看到:
cf-ray=...-ICN —— 这是 Cloudflare 的 PoP 标识,ICN 通常表示仁川/首尔边缘节点,说明你离 R2 的边缘很近,但速率还是取决于上游限速与跨网络链路。
4.2.5 拉取大层(layer blob)(16:42:05 → 307;16:43:21 → 206)
GET /v2/.../blobs/sha256:d07879... → 307 Location: https://...r2.cloudflarestorage.com/...
GET /_proxy/...r2.cloudflarestorage.com/... → 206 2,826,431 bytes
过程与 Step 3/4 相同,只是这个 blob 是真正的大层。
你的 access log 里 206 2826431,等于 ~2.70 MiB;整个拉取最终统计 total 2.7 MiB,耗时 86.3s(~32 KiB/s),这正是你 debug 里最后那行
4.3为什么这些 NGINX 指令至关重要
4.3.1认证引导(把令牌流程“拉到你域名”)
/v2/ 里:proxy_hide_header WWW-Authenticate; add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/token",service="registry.docker.io"' always;
/token 里:proxy_pass https://auth.docker.io/token$is_args$args; proxy_set_header Host auth.docker.io; ...; proxy_cache off; proxy_buffering off;
#这确保客户端永远找你要 token,你再转发给 auth.docker.io。这样即便直连外网不稳定,token 也能拿到。
4.3.2重定向改写到同域的 /_proxy
/v2/ 里:proxy_redirect 用正则把任何 https://<host>/<path> 改写为 https://xing.axzys.cn/_proxy/<host>/<path>。
#客户端永远与 你域名交互(包括下载层),不会直连 R2/S3/GCS —— 这就是加速器/出口统一的关键。
4.3.3 /_proxy 的安全与直通策略
允许名单:仅允许对象存储/官方域名;其他域一律 403(防 SSRF、钓鱼域)。
TLS/SNI 严格:与上游域名完全一致的 SNI 与证书验证。
禁缓存/禁缓冲/清理凭据:预签名 URL 是带时效与权限的,不能缓存;也不要带上任何敏感头。
只透传 Range:让上游直接按 Range 返回 206,最大化兼容客户端的断点续传与并行策略。
4.3.4 缓存与切片(仅对 /v2/)
slice 1m;
proxy_cache docker_cache;
proxy_cache_key ... $slice_range;
proxy_cache_valid 200 206 302 10m;
proxy_cache_use_stale error timeout updating http_5xx;
proxy_cache_lock on;
这套优化对直接从 /v2/ 回 200/206 的上游特别有效(很多私有 registry 会这么回)。
但对 Docker Hub,由于大层都会 30x 到对象存储,真正的数据并不在 /v2/,而是在 /_proxy(你已禁缓存)。因此:
/v2/ 的切片缓存主要惠及:manifest(200)和上游可能返回的 302/小对象;
大层数据不会进 cache(因为 /_proxy 禁缓存且 URL 是带过期时间的预签名)。这是刻意的正确选择,避免缓存过期签名导致 403。
4.4 你日志里出现的关键头部含义
Docker-RateLimit-Source: 15.164.211.114:Hub 把镜像拉取计数记到你服务器的出口 IP;所有内网客户端都会共享这个匿名配额。
RateLimit-Limit: 100;w=21600 / RateLimit-Remaining: 92;w=21600:匿名用户的典型限额(6 小时窗口 100 次)。
Content-Range: bytes 0-1471/1472、206 Partial Content:按 Range 断点下载,containerd/nerdctl 默认就会这么拉。
cf-ray=...-ICN:Cloudflare PoP(ICN=Incheon/Seoul),表明对象实际由边缘节点服务。
X-Cache-Status: BYPASS:这条来自 /_proxy,因为你明确 proxy_cache off,所以必然是 BYPASS(正常)。
4.5 为什么整体耗时 86.3s(32 KiB/s)
上游对匿名大层的限速(CDN 端的 Per-connection/Per-IP throttle)+ 公网出口质量 通常是最主要因素;
你的 /_proxy 正确地关闭了缓存,所以不会被“预签名 URL 过期/权限偏差”坑到,但也意味着无法靠本地缓存提升首拉速度;
第二次拉取(相同层/相同对象)也不会从 /_proxy 命中,因为 URL 带签名且时效变化;不过 manifest 与 302 在 /v2/ 有 10 分钟缓存,能节省“引导步骤”的往返。
4.6超简时间轴(把两段日志合在一起看)
16:41:57 HEAD manifests → 401(你的 /v2/ 故意引导去 /token)
16:42:00 HEAD manifests(带 token)→ 200(拿到 manifest list digest)
16:42:01 GET config blob → 401(token/scope 校验)
16:42:04 GET 同 config blob → 307(Hub 把数据放到 R2)
16:42:04 GET /_proxy 到 R2 → 206(1472B,config 完成)
16:42:05 GET layer blob → 307(重定向到 R2)
16:43:21 GET /_proxy 到 R2 → 206(2.7MiB,大层完成)
总计:2.7MiB / 86.3s ≈ 32KiB/s(主要瓶颈在上游/公网链路/限速)
评论 (0)