首页
导航
统计
留言
更多
壁纸
直播
关于
推荐
星的魔法
星的导航页
谷歌一下
镜像国内下载站
大模型国内下载站
docker镜像国内下载站
腾讯视频
Search
1
Ubuntu安装 kubeadm 部署k8s 1.30
214 阅读
2
kubeadm 部署k8s 1.30
133 阅读
3
rockylinux 9.3详细安装drbd
131 阅读
4
rockylinux 9.3详细安装drbd+keepalived
119 阅读
5
ceshi
82 阅读
默认分类
日记
linux
docker
k8s
ELK
Jenkins
Grafana
Harbor
Prometheus
Cepf
k8s安装
Gitlab
traefik
sonarqube
OpenTelemetry
MinIOn
Containerd进阶使用
ArgoCD
golang
Git
Python
Web开发
HTML和CSS
JavaScript
对象模型
公司
登录
/
注册
Search
标签搜索
k8s
linux
docker
drbd+keepalivde
ansible
dcoker
webhook
星
累计撰写
117
篇文章
累计收到
940
条评论
首页
栏目
默认分类
日记
linux
docker
k8s
ELK
Jenkins
Grafana
Harbor
Prometheus
Cepf
k8s安装
Gitlab
traefik
sonarqube
OpenTelemetry
MinIOn
Containerd进阶使用
ArgoCD
golang
Git
Python
Web开发
HTML和CSS
JavaScript
对象模型
公司
页面
导航
统计
留言
壁纸
直播
关于
推荐
星的魔法
星的导航页
谷歌一下
镜像国内下载站
大模型国内下载站
docker镜像国内下载站
腾讯视频
搜索到
115
篇与
的结果
2025-08-21
k8s镜像加速
一、安装配置nginx1.需要准备一个可以访问外网的服务器 2.安装nginx 3.准备域名解析到服务器,然后把证书配置到nginx里面 # /etc/nginx/sites-available/docker-mirror # DNS for variable proxy_pass resolver 1.1.1.1 8.8.8.8 valid=300s ipv6=off; # Cache (only used under /v2/) proxy_cache_path /var/cache/nginx/docker levels=1:2 keys_zone=docker_cache:50m max_size=300g inactive=7d use_temp_path=off; # Registry v2 header map $http_docker_distribution_api_version $docker_api_version { default "registry/2.0"; } # expose cache status map $upstream_cache_status $cache_status { default $upstream_cache_status; "" "BYPASS"; } server { listen 443 ssl http2; # listen 443 ssl http2 default_server; server_name xing.axzys.cn; ssl_certificate /etc/nginx/ssl/xing.axzys.cn.pem; ssl_certificate_key /etc/nginx/ssl/xing.axzys.cn.key; client_max_body_size 0; proxy_http_version 1.1; proxy_connect_timeout 60s; proxy_read_timeout 600s; proxy_send_timeout 600s; # 默认流式 proxy_buffering off; proxy_request_buffering off; proxy_set_header Connection ""; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header Docker-Distribution-Api-Version $docker_api_version; # 全局打开缓存(/_proxy、/token 会单独关闭) proxy_cache docker_cache; proxy_cache_lock on; proxy_cache_revalidate on; proxy_cache_min_uses 1; proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; proxy_cache_valid 200 206 302 10m; add_header X-Cache-Status $cache_status always; # 把上游 3xx Location 改写到 /_proxy/<host>/<path?query> proxy_redirect ~^https://(?<h>[^/]+)(?<p>/.*)$ https://$server_name/_proxy/$h$p; # ---------- token endpoint(Docker Hub 专用) ---------- location = /token { proxy_pass https://auth.docker.io/token$is_args$args; proxy_set_header Host auth.docker.io; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Authorization ""; proxy_cache off; proxy_buffering off; proxy_http_version 1.1; proxy_connect_timeout 30s; proxy_read_timeout 30s; proxy_send_timeout 30s; } # ---------- GHCR token 代领 ---------- location = /ghcr-token { proxy_pass https://ghcr.io/token$is_args$args; proxy_set_header Host ghcr.io; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Authorization ""; proxy_cache off; proxy_buffering off; proxy_http_version 1.1; proxy_connect_timeout 30s; proxy_read_timeout 30s; proxy_send_timeout 30s; } # ---------- /v2/ -> Docker Hub ---------- location ^~ /v2/ { set $upstream_host "registry-1.docker.io"; proxy_set_header Host $upstream_host; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; # 引导客户端去我们的 /token proxy_hide_header WWW-Authenticate; add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/token",service="registry.docker.io"' always; proxy_buffering off; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # ================= 其余注册中心(带前缀)================= # 先 set 再 rewrite;必要时仅对 GHCR 改写 WWW-Authenticate 到本地 /ghcr-token # ghcr.io location ^~ /ghcr/ { set $upstream_host "ghcr.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; # 去掉前缀 rewrite ^/ghcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; # 关键:把令牌下发到你自己的 /ghcr-token,避免客户端直连 ghcr.io/token 403/网络问题 proxy_hide_header WWW-Authenticate; add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/ghcr-token",service="ghcr.io"' always; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # gcr.io location ^~ /gcr/ { set $upstream_host "gcr.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/gcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # registry.k8s.io location ^~ /rk8s/ { set $upstream_host "registry.k8s.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/rk8s(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # 兼容 k8s.gcr.io -> registry.k8s.io location ^~ /kgcr/ { set $upstream_host "registry.k8s.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/kgcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # mcr.microsoft.com location ^~ /mcr/ { set $upstream_host "mcr.microsoft.com"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/mcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # nvcr.io location ^~ /nvcr/ { set $upstream_host "nvcr.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/nvcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # quay.io location ^~ /quay/ { set $upstream_host "quay.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/quay(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # docker.elastic.co location ^~ /elastic/ { set $upstream_host "docker.elastic.co"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/elastic(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # ---------- /_proxy/<host>/<path?query> -> 对象存储/CDN ---------- location ~ ^/_proxy/(?<h>[^/]+)(?<p>/.*)$ { if ($h !~* ^(registry-1\.docker\.io|auth\.docker\.io|production\.cloudflare\.docker\.com|.*\.cloudflarestorage\.com|.*\.r2\.cloudflarestorage\.com|.*\.amazonaws\.com|storage\.googleapis\.com|.*\.googleapis\.com|.*\.pkg\.dev|ghcr\.io|github\.com|pkg-containers\.[^/]*githubusercontent\.com|objects\.githubusercontent\.com|.*\.blob\.core\.windows\.net|.*\.azureedge\.net|mcr\.microsoft\.com|.*\.microsoft\.com|quay\.io|cdn\.quay\.io|.*quay-cdn[^/]*\.redhat\.com|k8s\.gcr\.io|registry\.k8s\.io|gcr\.io|docker\.elastic\.co|.*\.elastic\.co|.*\.cloudfront\.net|.*\.fastly\.net)$) { return 403; } set $upstream_host $h; # 去掉 '/_proxy/<host>' 前缀 rewrite ^/_proxy/[^/]+(?<rest>/.*)$ $rest break; # 正确 Host 与 SNI proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; proxy_ssl_protocols TLSv1.2 TLSv1.3; proxy_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; proxy_ssl_verify on; proxy_ssl_verify_depth 2; # 只透传客户端 Range proxy_set_header Range $http_range; # 不缓存预签名 URL;不缓冲 proxy_redirect off; proxy_cache off; proxy_buffering off; proxy_request_buffering off; # 避免把任何 Authorization 透传 proxy_set_header Authorization ""; # 不带 URI 的 proxy_pass proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } location = /healthz { return 200 'ok'; add_header Content-Type text/plain; } } # HTTP -> HTTPS server { listen 80; server_name xing.axzys.cn; return 301 https://$host$request_uri; } 二、配置k8s客户端vi /etc/containerd/config.toml[plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"] endpoint = ["https://k8s-registry.local"]#完整配置 disabled_plugins = [] imports = [] oom_score = 0 plugin_dir = "" required_plugins = [] root = "/var/lib/containerd" state = "/run/containerd" temp = "" version = 2 [cgroup] path = "" [debug] address = "" format = "" gid = 0 level = "" uid = 0 [grpc] address = "/run/containerd/containerd.sock" gid = 0 max_recv_message_size = 16777216 max_send_message_size = 16777216 tcp_address = "" tcp_tls_ca = "" tcp_tls_cert = "" tcp_tls_key = "" uid = 0 [metrics] address = "" grpc_histogram = false [plugins] [plugins."io.containerd.gc.v1.scheduler"] deletion_threshold = 0 mutation_threshold = 100 pause_threshold = 0.02 schedule_delay = "0s" startup_delay = "100ms" [plugins."io.containerd.grpc.v1.cri"] cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] device_ownership_from_security_context = false disable_apparmor = false disable_cgroup = false disable_hugetlb_controller = true disable_proc_mount = false disable_tcp_service = true drain_exec_sync_io_timeout = "0s" enable_cdi = false enable_selinux = false enable_tls_streaming = false enable_unprivileged_icmp = false enable_unprivileged_ports = false ignore_deprecation_warnings = [] ignore_image_defined_volumes = false image_pull_progress_timeout = "5m0s" image_pull_with_sync_fs = false max_concurrent_downloads = 3 max_container_log_line_size = 16384 netns_mounts_under_state_dir = false restrict_oom_score_adj = false sandbox_image = "registry.cn-guangzhou.aliyuncs.com/xingcangku/eeeee:3.8" selinux_category_range = 1024 stats_collect_period = 10 stream_idle_timeout = "4h0m0s" stream_server_address = "127.0.0.1" stream_server_port = "0" systemd_cgroup = false tolerate_missing_hugetlb_controller = true unset_seccomp_profile = "" [plugins."io.containerd.grpc.v1.cri".cni] bin_dir = "/opt/cni/bin" conf_dir = "/etc/cni/net.d" conf_template = "" ip_pref = "" max_conf_num = 1 setup_serially = false [plugins."io.containerd.grpc.v1.cri".containerd] default_runtime_name = "runc" disable_snapshot_annotations = true discard_unpacked_layers = false ignore_blockio_not_enabled_errors = false ignore_rdt_not_enabled_errors = false no_pivot = false snapshotter = "overlayfs" [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false privileged_without_host_devices_all_devices_allowed = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "" sandbox_mode = "" snapshotter = "" [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false privileged_without_host_devices_all_devices_allowed = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "io.containerd.runc.v2" sandbox_mode = "podsandbox" snapshotter = "" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] BinaryName = "" CriuImagePath = "" CriuPath = "" CriuWorkPath = "" IoGid = 0 IoUid = 0 NoNewKeyring = false NoPivotRoot = false Root = "" ShimCgroup = "" SystemdCgroup = true [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false privileged_without_host_devices_all_devices_allowed = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "" sandbox_mode = "" snapshotter = "" [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] [plugins."io.containerd.grpc.v1.cri".image_decryption] key_model = "node" [plugins."io.containerd.grpc.v1.cri".registry] config_path = "/etc/containerd/certs.d" [plugins."io.containerd.grpc.v1.cri".registry.auths] [plugins."io.containerd.grpc.v1.cri".registry.configs] [plugins."io.containerd.grpc.v1.cri".registry.headers] #[plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"] endpoint = ["https://15.164.211.114"] [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] tls_cert_file = "" tls_key_file = "" [plugins."io.containerd.internal.v1.opt"] path = "/opt/containerd" [plugins."io.containerd.internal.v1.restart"] interval = "10s" [plugins."io.containerd.internal.v1.tracing"] [plugins."io.containerd.metadata.v1.bolt"] content_sharing_policy = "shared" [plugins."io.containerd.monitor.v1.cgroups"] no_prometheus = false [plugins."io.containerd.nri.v1.nri"] disable = true disable_connections = false plugin_config_path = "/etc/containerd/certs.d" plugin_path = "/opt/nri/plugins" plugin_registration_timeout = "5s" plugin_request_timeout = "2s" socket_path = "/var/run/nri/nri.sock" [plugins."io.containerd.runtime.v1.linux"] no_shim = false runtime = "runc" runtime_root = "" shim = "containerd-shim" shim_debug = false [plugins."io.containerd.runtime.v2.task"] platforms = ["linux/amd64"] sched_core = false [plugins."io.containerd.service.v1.diff-service"] default = ["walking"] sync_fs = false [plugins."io.containerd.service.v1.tasks-service"] blockio_config_file = "" rdt_config_file = "" [plugins."io.containerd.snapshotter.v1.aufs"] root_path = "" [plugins."io.containerd.snapshotter.v1.blockfile"] fs_type = "" mount_options = [] root_path = "" scratch_file = "" [plugins."io.containerd.snapshotter.v1.btrfs"] root_path = "" [plugins."io.containerd.snapshotter.v1.devmapper"] async_remove = false base_image_size = "" discard_blocks = false fs_options = "" fs_type = "" pool_name = "" root_path = "" [plugins."io.containerd.snapshotter.v1.native"] root_path = "" [plugins."io.containerd.snapshotter.v1.overlayfs"] mount_options = [] root_path = "" sync_remove = false upperdir_label = false [plugins."io.containerd.snapshotter.v1.zfs"] root_path = "" [plugins."io.containerd.tracing.processor.v1.otlp"] [plugins."io.containerd.transfer.v1.local"] config_path = "/etc/containerd/certs.d" max_concurrent_downloads = 3 max_concurrent_uploaded_layers = 3 [[plugins."io.containerd.transfer.v1.local".unpack_config]] differ = "" platform = "linux/amd64" snapshotter = "overlayfs" [proxy_plugins] [stream_processors] [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar" [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar+gzip" [timeouts] "io.containerd.timeout.bolt.open" = "0s" "io.containerd.timeout.metrics.shimstats" = "2s" "io.containerd.timeout.shim.cleanup" = "5s" "io.containerd.timeout.shim.load" = "5s" "io.containerd.timeout.shim.shutdown" = "3s" "io.containerd.timeout.task.state" = "2s" [ttrpc] address = "" gid = 0 uid = 0sudo mkdir -p /etc/containerd/certs.d/{docker.io,ghcr.io,gcr.io,registry.k8s.io,k8s.gcr.io,mcr.microsoft.com,nvcr.io,quay.io,docker.elastic.co}root@k8s-03:/etc/containerd/certs.d# cat /etc/containerd/certs.d/ghcr.io/hosts.toml server = "https://ghcr.io" [host."https://xing.axzys.cn/ghcr/v2"] capabilities = ["pull", "resolve"] override_path = true skip_verify = falseroot@k8s-03:~# cat /etc/containerd/certs.d/docker.io/hosts.toml server = "https://registry-1.docker.io" [host."https://xing.axzys.cn"] capabilities = ["pull", "resolve"] skip_verify = false root@k8s-03:~# cat /etc/containerd/certs.d/k8s.gcr.io/hosts.toml server = "https://k8s.gcr.io" [host."https://xing.axzys.cn/kgcr/2"] capabilities = ["pull", "resolve"] override_path = true skip_verify = false root@k8s-03:~# cat /etc/containerd/certs.d/registry.k8s.io/hosts.toml server = "https://registry.k8s.io" [host."https://xing.axzys.cn/rk8s/v2"] capabilities = ["pull", "resolve"] override_path = true skip_verify = false root@k8s-03:~# cat /etc/containerd/certs.d/registry-1.docker.io/hosts.toml server = "https://registry-1.docker.io" [host."https://xing.axzys.cn"] capabilities = ["pull", "resolve"] skip_verify = false root@k8s-03:~# cat /etc/containerd/certs.d/quay.io/hosts.toml server = "https://quay.io" [host."https://xing.axzys.cn/quay/v2"] capabilities = ["pull", "resolve"] override_path = true skip_verify = false root@k8s-03:~# cat /etc/containerd/certs.d/docker.elastic.co/hosts.toml server = "https://docker.elastic.co" [host."https://xing.axzys.cn/elastic/2"] capabilities = ["pull", "resolve"] override_path = true skip_verify = falseroot@k8s-03:~# cat /etc/containerd/certs.d/ghcr.io/hosts.toml server = "https://ghcr.io" [host."https://xing.axzys.cn/ghcr/v2"] capabilities = ["pull", "resolve"] override_path = true skip_verify = false #重启containerd生效 sudo systemctl restart containerd三、测试拉取#测试拉取镜像 root@k8s-03:/etc/containerd/certs.d# sudo nerdctl -n k8s.io --debug pull docker.io/library/alpine:3.15 DEBU[0000] verifying process skipped DEBU[0000] The image will be unpacked for platform {"amd64" "linux" "" [] ""}, snapshotter "overlayfs". DEBU[0000] fetching image="docker.io/library/alpine:3.15" DEBU[0000] loading host directory dir=/etc/containerd/certs.d/docker.io DEBU[0000] resolving host=xing.axzys.cn DEBU[0000] do request host=xing.axzys.cn request.header.accept="application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.oci.image.index.v1+json, */*" request.header.user-agent=containerd/2.1.1+unknown request.method=HEAD url="https://xing.axzys.cn/v2/library/alpine/manifests/3.15?ns=docker.io" docker.io/library/alpine:3.15: resolving |--------------------------------------| elapsed: 0.9 s total: 0.0 B (0.0 B/s) DEBU[0001] fetch response received host=xing.axzys.cn response.header.connection=keep-alive response.header.content-length=157 response.header.content-type=application/json response.header.date="Sat, 23 Aug 2025 16:41:57 GMT" response.header.docker-distribution-api-version=registry/2.0 response.header.docker-ratelimit-source=15.164.211.114 response.header.server=nginx response.header.strict-transport-security="max-age=31536000" response.header.www-authenticate="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" response.status="401 Unauthorized" url="https://xing.axzys.cn/v2/library/alpine/manifests/3.15?ns=docker.io" DEBU[0001] Unauthorized header="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" host=xing.axzys.cn DEBU[0001] no scope specified for token auth challenge host=xing.axzys.cn DEBU[0001] do request host=xing.axzys.cn request.header.accept="application/vnd.docker.distribution.manifest.v2+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, applicatio docker.io/library/alpine:3.15: resolving |--------------------------------------| elapsed: 3.3 s total: 0.0 B (0.0 B/s) DEBU[0003] fetch response received host=xing.axzys.cn response.header.connection=keep-alive response.header.content-length=1638 response.header.content-type=application/vnd.docker.distribution.manifest.list.v2+json response.header.date="Sat, 23 Aug 2025 16:42:00 GMT" response.header.docker-content-digest="sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864" response.header.docker-distribution-api-version=registry/2.0 response.header.docker-ratelimit-source=15.164.211.114 response.header.etag="\"sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864\"" response.header.ratelimit-limit="100;w=21600" response.header.ratelimit-remaining="92;w=21600" response.header.server=nginx response.header.strict-transport-security="max-age=31536000" response.header.www-authenticate="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" response.status="200 OK" url="https://xing.axzys.cn/v2/library/alpine/manifests/3.15?ns=docker.io" DEBU[0003] resolved desc.digest="sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864" host=xing.axzys.cn DEBU[0003] loading host directory dir=/etc/containerd/certs.d/docker.io docker.io/library/alpine:3.15: resolving |--------------------------------------| elapsed: 3.4 s total: 0.0 B (0.0 B/s) DEBU[0003] fetch digest="sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5" mediatype=application/vnd.docker.distribution.manifest.v2+json size=528 DEBU[0003] fetch digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json size=1472 DEBU[0003] fetching layer chunk_size=0 digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" initial_parallelism=0 mediatype=application/vnd.docker.container.image.v1+json offset=0 parallelism=1 size=1472 DEBU[0003] do request digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json request.header.accept="application/vnd.docker.container.image.v1+json, */*" request.header.accept-encoding="zstd;q=1.0, gzip;q=0.8, deflate;q=0.5" request.header.range="bytes=0-" request.header.user-agent docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++| index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++| manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++| config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: downloading |--------------------------------------| 0.0 B/1.4 KiB elapsed: 4.4 s total: 0.0 B (0.0 B/s) DEBU[0004] fetch response received digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json response.header.connection=keep-alive response.header.content-length=157 response.header.content-type=application/json response.header.date="Sat, 23 Aug 2025 16:42:01 GMT" response.header.docker-distribution-api-version=registry/2.0 response.header.server=nginx response.header.strict-transport-security="max-age=31536000" response.header.www-authenticate="Bearer realm=\"https://xing.axzys.cn/token\",service=\"registry.docker.io\"" response.status="401 Unauthorized" size=1472 url="https://xing.axzys.cn/v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io" docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++| index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++| manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++| config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: downloading |--------------------------------------| 0.0 B/1.4 KiB elapsed: 8.2 s total: 0.0 B (0.0 B/s) DEBU[0008] fetch response received digest="sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d" mediatype=application/vnd.docker.container.image.v1+json response.header.accept-ranges=bytes response.header.cf-ray=973c0fa05a2930d3-ICN response.header.connection=keep-alive response.header.content-length=1472 response.header.content-range="bytes 0-1471/1472" response.header.content-type=application/octet-stream response.header.date="Sat, 23 Aug 2025 16:42:04 GMT" response.header.etag="\"aa36606459d6778a94123c7d6a33396b\"" response.header.last-modified="Fri, 13 Dec 2024 15:03:06 GMT" response.header.server=nginx response.header.vary=Accept-Encoding response.header.x-cache-status=BYPASS response.status="206 Partial Content" size=1472 url="https://xing.axzys.cn/v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io" docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++| index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++| manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++| config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: done |+++++++++++++++++++++++++++++++++++ docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++| index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++| docker.io/library/alpine:3.15: resolved |++++++++++++++++++++++++++++++++++++++| index-sha256:19b4bcc4f60e99dd5ebdca0cbce22c503bbcff197549d7e19dab4f22254dc864: exists |++++++++++++++++++++++++++++++++++++++| manifest-sha256:6a0657acfef760bd9e293361c9b558e98e7d740ed0dffca823d17098a4ffddf5: exists |++++++++++++++++++++++++++++++++++++++| config-sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d: done |++++++++++++++++++++++++++++++++++++++| layer-sha256:d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108: done |++++++++++++++++++++++++++++++++++++++| elapsed: 86.3s total: 2.7 Mi (32.0 KiB/s) #使用k8syaml文件拉取 Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Pulled 7m2s (x6 over 5h7m) kubelet Container image "docker.io/library/alpine:3.9" already present on machine Normal Created 7m2s (x6 over 5h7m) kubelet Created container test-container Normal Started 7m1s (x6 over 5h7m) kubelet Started container test-container #nginx日志 223.74.152.108 - - [23/Aug/2025:16:41:57 +0000] "HEAD /v2/library/alpine/manifests/3.15?ns=docker.io HTTP/1.1" 401 0 "-" "containerd/2.1.1+unknown" 223.74.152.108 - - [23/Aug/2025:16:42:00 +0000] "HEAD /v2/library/alpine/manifests/3.15?ns=docker.io HTTP/1.1" 200 0 "-" "containerd/2.1.1+unknown" 223.74.152.108 - - [23/Aug/2025:16:42:01 +0000] "GET /v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io HTTP/1.1" 401 157 "-" "containerd/2.1.1+unknown" 223.74.152.108 - - [23/Aug/2025:16:42:04 +0000] "GET /v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io HTTP/1.1" 307 0 "-" "containerd/2.1.1+unknown" 223.74.152.108 - - [23/Aug/2025:16:42:04 +0000] "GET /_proxy/docker-images-prod.6aa30f8b08e16409b46e0173d6de2f56.r2.cloudflarestorage.com/registry-v2/docker/registry/v2/blobs/sha256/32/32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d/data?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=f1baa2dd9b876aeb89efebbfc9e5d5f4%2F20250823%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250823T164203Z&X-Amz-Expires=1200&X-Amz-SignedHeaders=host&X-Amz-Signature=860ec74942b8c48e9922b561b9ef4cfd409dc4acf22daa9a31a45754aff6d32a HTTP/1.1" 206 1472 "https://xing.axzys.cn/v2/library/alpine/blobs/sha256:32b91e3161c8fc2e3baf2732a594305ca5093c82ff4e0c9f6ebbd2a879468e1d?ns=docker.io" "containerd/2.1.1+unknown" 223.74.152.108 - - [23/Aug/2025:16:42:05 +0000] "GET /v2/library/alpine/blobs/sha256:d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108?ns=docker.io HTTP/1.1" 307 0 "-" "containerd/2.1.1+unknown" 223.74.152.108 - - [23/Aug/2025:16:43:21 +0000] "GET /_proxy/docker-images-prod.6aa30f8b08e16409b46e0173d6de2f56.r2.cloudflarestorage.com/registry-v2/docker/registry/v2/blobs/sha256/d0/d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108/data?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=f1baa2dd9b876aeb89efebbfc9e5d5f4%2F20250823%2Fauto%2Fs3%2Faws4_request&X-Amz-Date=20250823T164205Z&X-Amz-Expires=1200&X-Amz-SignedHeaders=host&X-Amz-Signature=118da1e073a4589f6a14cb751acfbfdb0c7431fa55703f24d5278e7ec26246a3 HTTP/1.1" 206 2826431 "https://xing.axzys.cn/v2/library/alpine/blobs/sha256:d078792c4f9122259f14b539315bd92cbd9490ed73e08255a08689122b143108?ns=docker.io" "containerd/2.1.1+unknown" 四、剖析过程和nginx配置 4.1参与者与目标- Client:nerdctl/containerd - Mirror:NGINX@ xing.axzys.cn(你这份配置) - Docker Hub:registry-1.docker.io(镜像 API) + auth.docker.io(发 token) - 对象存储/CDN(Docker Hub 背后):Cloudflare R2 等(这次命中 *.r2.cloudflarestorage.com) 目标:客户端的所有请求都打到你域名,由 NGINX 统一处理认证、改写 3xx、缓存 /v2/ 下可缓存内容;当上游把大文件重定向到对象存储时,继续保持同域(走你域名的 /_proxy/...),避免直连外网受限/不可达。4.2按时间线还原整条链路时间均来自你贴的两段日志(nginx access log 与 nerdctl --debug),相互印证。4.2.1准备(为什么需要 resolver/SNI 等)你在 http 块里: resolver 1.1.1.1 8.8.8.8 ...; 因为后面大量用到了变量形式的 upstream 主机名($upstream_host),Nginx 需要在运行时解 DNS。 在 /v2/ 和 /_proxy/ 中你都开启了: proxy_ssl_server_name on; proxy_ssl_name $upstream_host; 这样跟上游 TLS 握手时,SNI 会填真实目标域名,证书校验才会通过。4.2.2HEAD manifest 触发认证(16:41:57 → 401)HEAD /v2/library/alpine/manifests/3.15?ns=docker.io → 401 WWW-Authenticate: Bearer realm="https://xing.axzys.cn/token", service="registry.docker.io"谁返回 401? 你的 NGINX(并非 Docker Hub)。为什么? 你在 /v2/: proxy_hide_header WWW-Authenticate; add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/token",service="registry.docker.io"' always; 这会强制把认证引导到你自己的 /token 端点,从而把“领 token”的流量固定在你的域名下(便于出网与审计)。 client 行为:containerd 收到 401 + WWW-Authenticate 后,会去 GET https://xing.axzys.cn/token?... 领一个 Bearer Token(JWT)。 你的 access log 片段里没贴出 /token 的那行,但从后续现象可知它已成功。nerdctl debug 里出现 Unauthorized ... no scope specified for token auth challenge 这只是 containerd 发起 token 流程的常见提示——第一次 401 只提供了 realm/service,后续在请求具体资源时它会带上 scope=repository:library/alpine:pull 等参数去换真正可用的 token。4.2.3 拉取 config blob(16:42:01 → 401 → 307)GET /v2/.../blobs/sha256:32b91e... → 401 ...随后同 URL → 307 Location: https://...r2.cloudflarestorage.com/...- 第一次 401:常见于 token 刷新或 scope 切换;containerd 会透明地再换一次 token(或掉头再次请求),随即你就看到 307 了。 - 307 from Hub/CDN:Docker Hub 对于实际二进制层(包括 config 层)不会直接回源给你,而是下发一个预签名 URL(Cloudflare R2/S3/GCS 等)。你的 /v2/ 配置有: proxy_redirect ~^https://(?<h>[^/]+)(?<p>/.*)$ https://$server_name/_proxy/$h$p; #这会把上游 30x Location 改写成你域名下的 /_proxy/<原host>/<原path?query>,于是客户端继续请求你域名,而不会直连 R2。4.2.4 通过 /_proxy 去对象存储(16:42:04 → 206)GET /_proxy/docker-images-prod....r2.cloudflarestorage.com/... → 206 Partial Content Content-Range: bytes 0-1471/1472 X-Cache-Status: BYPASS命中你 location ~ ^/_proxy/... 域名白名单严格校验,非允许列表一律 403(你已经列了 R2/S3/GCS/Quay CDN/Azure/Microsoft/Elastic/Cloudfront/Fastly 等)。 SNI/证书校验对齐上游真实主机(proxy_ssl_name $upstream_host; proxy_ssl_verify on;)。 不缓存(proxy_cache off;),不缓冲(proxy_buffering off;),不透传 Authorization(安全起见,proxy_set_header Authorization "";)。 仅透传 Range:proxy_set_header Range $http_range; —— 客户端最常发 Range: bytes=0-,于是上游返回 206 Partial Content。这次的对象是 config(1472 字节),一口气就拿完了(Content-Range: 0-1471/1472)。 nerdctl debug 里还能看到: cf-ray=...-ICN —— 这是 Cloudflare 的 PoP 标识,ICN 通常表示仁川/首尔边缘节点,说明你离 R2 的边缘很近,但速率还是取决于上游限速与跨网络链路。4.2.5 拉取大层(layer blob)(16:42:05 → 307;16:43:21 → 206)GET /v2/.../blobs/sha256:d07879... → 307 Location: https://...r2.cloudflarestorage.com/... GET /_proxy/...r2.cloudflarestorage.com/... → 206 2,826,431 bytes过程与 Step 3/4 相同,只是这个 blob 是真正的大层。 你的 access log 里 206 2826431,等于 ~2.70 MiB;整个拉取最终统计 total 2.7 MiB,耗时 86.3s(~32 KiB/s),这正是你 debug 里最后那行4.3为什么这些 NGINX 指令至关重要4.3.1认证引导(把令牌流程“拉到你域名”)/v2/ 里:proxy_hide_header WWW-Authenticate; add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/token",service="registry.docker.io"' always; /token 里:proxy_pass https://auth.docker.io/token$is_args$args; proxy_set_header Host auth.docker.io; ...; proxy_cache off; proxy_buffering off; #这确保客户端永远找你要 token,你再转发给 auth.docker.io。这样即便直连外网不稳定,token 也能拿到。4.3.2重定向改写到同域的 /_proxy/v2/ 里:proxy_redirect 用正则把任何 https://<host>/<path> 改写为 https://xing.axzys.cn/_proxy/<host>/<path>。 #客户端永远与 你域名交互(包括下载层),不会直连 R2/S3/GCS —— 这就是加速器/出口统一的关键。4.3.3 /_proxy 的安全与直通策略允许名单:仅允许对象存储/官方域名;其他域一律 403(防 SSRF、钓鱼域)。 TLS/SNI 严格:与上游域名完全一致的 SNI 与证书验证。 禁缓存/禁缓冲/清理凭据:预签名 URL 是带时效与权限的,不能缓存;也不要带上任何敏感头。 只透传 Range:让上游直接按 Range 返回 206,最大化兼容客户端的断点续传与并行策略。4.3.4 缓存与切片(仅对 /v2/)slice 1m; proxy_cache docker_cache; proxy_cache_key ... $slice_range; proxy_cache_valid 200 206 302 10m; proxy_cache_use_stale error timeout updating http_5xx; proxy_cache_lock on; 这套优化对直接从 /v2/ 回 200/206 的上游特别有效(很多私有 registry 会这么回)。 但对 Docker Hub,由于大层都会 30x 到对象存储,真正的数据并不在 /v2/,而是在 /_proxy(你已禁缓存)。因此: /v2/ 的切片缓存主要惠及:manifest(200)和上游可能返回的 302/小对象; 大层数据不会进 cache(因为 /_proxy 禁缓存且 URL 是带过期时间的预签名)。这是刻意的正确选择,避免缓存过期签名导致 403。4.4 你日志里出现的关键头部含义Docker-RateLimit-Source: 15.164.211.114:Hub 把镜像拉取计数记到你服务器的出口 IP;所有内网客户端都会共享这个匿名配额。 RateLimit-Limit: 100;w=21600 / RateLimit-Remaining: 92;w=21600:匿名用户的典型限额(6 小时窗口 100 次)。 Content-Range: bytes 0-1471/1472、206 Partial Content:按 Range 断点下载,containerd/nerdctl 默认就会这么拉。 cf-ray=...-ICN:Cloudflare PoP(ICN=Incheon/Seoul),表明对象实际由边缘节点服务。 X-Cache-Status: BYPASS:这条来自 /_proxy,因为你明确 proxy_cache off,所以必然是 BYPASS(正常)。4.5 为什么整体耗时 86.3s(32 KiB/s)上游对匿名大层的限速(CDN 端的 Per-connection/Per-IP throttle)+ 公网出口质量 通常是最主要因素; 你的 /_proxy 正确地关闭了缓存,所以不会被“预签名 URL 过期/权限偏差”坑到,但也意味着无法靠本地缓存提升首拉速度; 第二次拉取(相同层/相同对象)也不会从 /_proxy 命中,因为 URL 带签名且时效变化;不过 manifest 与 302 在 /v2/ 有 10 分钟缓存,能节省“引导步骤”的往返。4.6超简时间轴(把两段日志合在一起看)16:41:57 HEAD manifests → 401(你的 /v2/ 故意引导去 /token) 16:42:00 HEAD manifests(带 token)→ 200(拿到 manifest list digest) 16:42:01 GET config blob → 401(token/scope 校验) 16:42:04 GET 同 config blob → 307(Hub 把数据放到 R2) 16:42:04 GET /_proxy 到 R2 → 206(1472B,config 完成) 16:42:05 GET layer blob → 307(重定向到 R2) 16:43:21 GET /_proxy 到 R2 → 206(2.7MiB,大层完成) 总计:2.7MiB / 86.3s ≈ 32KiB/s(主要瓶颈在上游/公网链路/限速)# /etc/nginx/sites-available/docker-mirror # DNS for variable proxy_pass resolver 1.1.1.1 8.8.8.8 valid=300s ipv6=off; # Cache (only used under /v2/) proxy_cache_path /var/cache/nginx/docker levels=1:2 keys_zone=docker_cache:50m max_size=300g inactive=7d use_temp_path=off; # Registry v2 header map $http_docker_distribution_api_version $docker_api_version { default "registry/2.0"; } # expose cache status map $upstream_cache_status $cache_status { default $upstream_cache_status; "" "BYPASS"; } server { listen 443 ssl http2; server_name xing.axzys.cn; ssl_certificate /etc/nginx/ssl/xing.axzys.cn.pem; ssl_certificate_key /etc/nginx/ssl/xing.axzys.cn.key; client_max_body_size 0; proxy_http_version 1.1; proxy_connect_timeout 60s; proxy_read_timeout 600s; proxy_send_timeout 600s; # 默认流式 proxy_buffering off; proxy_request_buffering off; proxy_set_header Connection ""; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header Docker-Distribution-Api-Version $docker_api_version; # 全局打开缓存(/_proxy、/token 会单独关闭) proxy_cache docker_cache; proxy_cache_lock on; proxy_cache_revalidate on; proxy_cache_min_uses 1; proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; proxy_cache_valid 200 206 302 10m; add_header X-Cache-Status $cache_status always; # 把上游 3xx Location 改写到 /_proxy/<host>/<path?query> proxy_redirect ~^https://(?<h>[^/]+)(?<p>/.*)$ https://$server_name/_proxy/$h$p; # ---------- token endpoint(Docker Hub 专用) ---------- location = /token { proxy_pass https://auth.docker.io/token$is_args$args; proxy_set_header Host auth.docker.io; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Authorization ""; proxy_cache off; proxy_buffering off; proxy_http_version 1.1; proxy_connect_timeout 30s; proxy_read_timeout 30s; proxy_send_timeout 30s; } # ---------- GHCR token 代领 ---------- location = /ghcr-token { proxy_pass https://ghcr.io/token$is_args$args; proxy_set_header Host ghcr.io; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Authorization ""; proxy_cache off; proxy_buffering off; proxy_http_version 1.1; proxy_connect_timeout 30s; proxy_read_timeout 30s; proxy_send_timeout 30s; } # ---------- /v2/ -> 本机 crproxy (Docker Hub) ---------- # 关键修正:把 /v2/... 重写为 **/v2/docker.io/...**(原来少了 /v2,导致 301 -> /_proxy/hub.docker.com -> 403) location ^~ /v2/ { set $upstream_host "127.0.0.1:6440"; proxy_set_header Host $upstream_host; # ✅ 正确:保持 /v2 前缀 rewrite ^/v2(?<rest>/.*)$ /v2/docker.io$rest break; # 分片 + 缓存键 slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; # 引导客户端去我们的 /token proxy_hide_header WWW-Authenticate; add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/token",service="registry.docker.io"' always; proxy_buffering off; proxy_pass http://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # ================= 其余注册中心(带前缀)================= # ghcr.io location ^~ /ghcr/ { set $upstream_host "ghcr.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/ghcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_hide_header WWW-Authenticate; add_header WWW-Authenticate 'Bearer realm="https://xing.axzys.cn/ghcr-token",service="ghcr.io"' always; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # gcr.io location ^~ /gcr/ { set $upstream_host "gcr.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/gcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # registry.k8s.io location ^~ /rk8s/ { set $upstream_host "registry.k8s.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/rk8s(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # 兼容 k8s.gcr.io -> registry.k8s.io location ^~ /kgcr/ { set $upstream_host "registry.k8s.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/kgcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # mcr.microsoft.com location ^~ /mcr/ { set $upstream_host "mcr.microsoft.com"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/mcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # nvcr.io location ^~ /nvcr/ { set $upstream_host "nvcr.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/nvcr(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # quay.io location ^~ /quay/ { set $upstream_host "quay.io"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/quay(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # docker.elastic.co location ^~ /elastic/ { set $upstream_host "docker.elastic.co"; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; rewrite ^/elastic(?<rest>/.*)$ $rest break; slice 1m; proxy_cache_key $scheme$upstream_host$request_uri$is_args$args$slice_range; proxy_set_header Range $slice_range; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } # ---------- /_proxy/<host>/<path?query> -> 对象存储/CDN ---------- location ~ ^/_proxy/(?<h>[^/]+)(?<p>/.*)$ { if ($h !~* ^(registry-1\.docker\.io|auth\.docker\.io|production\.cloudflare\.docker\.com|.*\.cloudflarestorage\.com|.*\.r2\.cloudflarestorage\.com|.*\.amazonaws\.com|storage\.googleapis\.com|.*\.googleapis\.com|.*\.pkg\.dev|ghcr\.io|github\.com|pkg-containers\.[^/]*githubusercontent\.com|objects\.githubusercontent\.com|.*\.blob\.core\.windows\.net|.*\.azureedge\.net|mcr\.microsoft\.com|.*\.microsoft\.com|quay\.io|cdn\.quay\.io|.*quay-cdn[^/]*\.redhat\.com|k8s\.gcr\.io|registry\.k8s\.io|gcr\.io|docker\.elastic\.co|.*\.elastic\.co|.*\.cloudfront\.net|.*\.fastly\.net)$) { return 403; } set $upstream_host $h; rewrite ^/_proxy/[^/]+(?<rest>/.*)$ $rest break; proxy_set_header Host $upstream_host; proxy_ssl_server_name on; proxy_ssl_name $upstream_host; proxy_ssl_protocols TLSv1.2 TLSv1.3; proxy_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; proxy_ssl_verify on; proxy_ssl_verify_depth 2; proxy_set_header Range $http_range; proxy_redirect off; proxy_cache off; proxy_buffering off; proxy_request_buffering off; proxy_set_header Authorization ""; proxy_pass https://$upstream_host; access_log /var/log/nginx/docker_mirror_access.log; error_log /var/log/nginx/docker_mirror_error.log warn; } location = /healthz { return 200 'ok'; add_header Content-Type text/plain; } } # HTTP -> HTTPS server { listen 80; server_name xing.axzys.cn; return 301 https://$host$request_uri; }
2025年08月21日
5 阅读
0 评论
0 点赞
2025-08-19
Helm App创建
一、gitlab仓库配置 1.1克隆代码root@k8s-01:~/argocd# cd /opt/ root@k8s-01:/opt# ls cni containerd root@k8s-01:/opt# git clone http://192.168.30.181/develop/argo-demo.git Cloning into 'argo-demo'... Username for 'http://192.168.30.181': root Password for 'http://root@192.168.30.181': remote: Enumerating objects: 19, done. remote: Counting objects: 100% (19/19), done. remote: Compressing objects: 100% (16/16), done. remote: Total 19 (delta 3), reused 0 (delta 0), pack-reused 0 (from 0) Receiving objects: 100% (19/19), 4.49 KiB | 1.12 MiB/s, done. Resolving deltas: 100% (3/3), done. root@k8s-01:/opt# cd argo-demo/ root@k8s-01:/opt/argo-demo# ls manifests README.md root@k8s-01:/opt/argo-demo# 1.2创建Helm应用创建一个名为helm的approot@k8s-01:/opt/argo-demo# helm create helm Creating helm root@k8s-01:/opt/argo-demo# ls helm manifests README.md root@k8s-01:/opt/argo-demo# tree helm helm ├── charts ├── Chart.yaml ├── templates │ ├── deployment.yaml │ ├── _helpers.tpl │ ├── hpa.yaml │ ├── ingress.yaml │ ├── NOTES.txt │ ├── serviceaccount.yaml │ ├── service.yaml │ └── tests │ └── test-connection.yaml └── values.yaml 3 directories, 10 files 修改helm配置[root@tiaoban argo-demo]# cd helm/ [root@tiaoban helm]# vim Chart.yaml appVersion: "v1" # 修改默认镜像版本为v1 [root@tiaoban helm]# vim values.yaml image: repository: ikubernetes/myapp # 修改镜像仓库地址helm文件校验root@k8s-01:/opt/argo-demo# helm lint helm ==> Linting helm [INFO] Chart.yaml: icon is recommended 1 chart(s) linted, 0 chart(s) failed 1.3推送代码root@k8s-01:/opt/argo-demo# git add . root@k8s-01:/opt/argo-demo# git commit -m "add helm" Author identity unknown *** Please tell me who you are. Run git config --global user.email "you@example.com" git config --global user.name "Your Name" to set your account's default identity. Omit --global to set the identity only in this repository. fatal: unable to auto-detect email address (got 'root@k8s-01.(none)') root@k8s-01:/opt/argo-demo# root@k8s-01:/opt/argo-demo# root@k8s-01:/opt/argo-demo# root@k8s-01:/opt/argo-demo# git config --global user.email “790731@qq.com” git config --global user.name "axing" root@k8s-01:/opt/argo-demo# git commit -m "add helm" [main ea70765] add helm 11 files changed, 450 insertions(+) create mode 100644 helm/.helmignore create mode 100644 helm/Chart.yaml create mode 100644 helm/templates/NOTES.txt create mode 100644 helm/templates/_helpers.tpl create mode 100644 helm/templates/deployment.yaml create mode 100644 helm/templates/hpa.yaml create mode 100644 helm/templates/ingress.yaml create mode 100644 helm/templates/service.yaml create mode 100644 helm/templates/serviceaccount.yaml create mode 100644 helm/templates/tests/test-connection.yaml create mode 100644 helm/values.yaml root@k8s-01:/opt/argo-demo# root@k8s-01:/opt/argo-demo# git push Username for 'http://192.168.30.181': root Password for 'http://root@192.168.30.181': Enumerating objects: 17, done. Counting objects: 100% (17/17), done. Delta compression using up to 8 threads Compressing objects: 100% (15/15), done. Writing objects: 100% (16/16), 6.00 KiB | 6.00 MiB/s, done. Total 16 (delta 0), reused 0 (delta 0), pack-reused 0 To http://192.168.30.181/develop/argo-demo.git 293d75f..ea70765 main -> main root@k8s-01:/opt/argo-demo# 1.4查看验证二、Argo CD配置 2.1创建helm类型的app通过Argo UI创建app,填写如下信息:2.2查看验证查看argo cd应用信息,已完成部署。登录k8s查看资源[root@tiaoban helm]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES demo-helm-585b5ddb66-bdbcr 1/1 Running 0 2m38s 10.244.3.31 work3 <none> <none> rockylinux 1/1 Running 13 (140m ago) 13d 10.244.1.7 work1 <none> <none> [root@tiaoban helm]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE demo-helm ClusterIP 10.105.202.171 <none> 80/TCP 2m41s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 279d [root@tiaoban helm]# kubectl exec -it rockylinux -- bash [root@rockylinux /]# curl demo-helm Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>版本更新测试#修改git仓库文件,模拟版本更新 root@k8s-01:/opt/argo-demo# cd helm/ root@k8s-01:/opt/argo-demo/helm# ls charts Chart.yaml templates values.yaml root@k8s-01:/opt/argo-demo/helm# vi Chart.yaml root@k8s-01:/opt/argo-demo/helm# ls charts Chart.yaml templates values.yaml root@k8s-01:/opt/argo-demo/helm# vi values.yaml root@k8s-01:/opt/argo-demo/helm# ls charts Chart.yaml templates values.yaml # 提交推送至git仓库 root@k8s-01:/opt/argo-demo/helm# git add . root@k8s-01:/opt/argo-demo/helm# git commit -m "update helm v2" [main 59dcb2d] update helm v2 2 files changed, 3 insertions(+), 3 deletions(-) root@k8s-01:/opt/argo-demo/helm# git push Username for 'http://192.168.30.181': root Password for 'http://root@192.168.30.181': Enumerating objects: 9, done. Counting objects: 100% (9/9), done. Delta compression using up to 8 threads Compressing objects: 100% (5/5), done. Writing objects: 100% (5/5), 475 bytes | 475.00 KiB/s, done. Total 5 (delta 3), reused 0 (delta 0), pack-reused 0 To http://192.168.30.181/develop/argo-demo.git ea70765..59dcb2d main -> main root@k8s-01:/opt/argo-demo/helm# 查看argo cd更新记录访问验证[root@tiaoban helm]# kubectl exec -it rockylinux -- bash [root@rockylinux /]# curl demo-helm Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
2025年08月19日
4 阅读
0 评论
0 点赞
2025-08-19
Directory APP创建与配置
一、APP创建 1.1webUI创建1.2CLI创建除了使用webUI创建应用外,也可以使用Argo CLI命令行工具创建# 创建应用 root@k8s-01:~/argocd# argocd app create demo1 \ --repo http://192.168.30.181/develop/argo-demo.git \ --path manifests/ --sync-policy automatic --dest-namespace default \ --dest-server https://kubernetes.default.svc --directory-recurse WARN[0000] Failed to invoke grpc call. Use flag --grpc-web in grpc calls. To avoid this warning message, use flag --grpc-web. application 'demo1' created root@k8s-01:~/argocd# # 查看应用列表 root@k8s-01:~/argocd# argocd app list WARN[0000] Failed to invoke grpc call. Use flag --grpc-web in grpc calls. To avoid this warning message, use flag --grpc-web. NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS REPO PATH TARGET argocd/demo https://kubernetes.default.svc default OutOfSync Progressing Manual SharedResourceWarning(3) http://192.168.30.181/develop/argo-demo.git manifests HEAD argocd/demo-test https://kubernetes.default.svc default OutOfSync Healthy Manual SharedResourceWarning(3) http://192.168.30.181/develop/argo-demo.git manifests/ HEAD argocd/demo1 https://kubernetes.default.svc default default Synced Healthy Auto <none> http://192.168.30.181/develop/argo-demo.git manifests/ # 查看应用状态 root@k8s-01:~/argocd# kubectl get application -n argocd NAME SYNC STATUS HEALTH STATUS demo OutOfSync Progressing demo-test OutOfSync Healthy demo1 Synced Healthy # 执行立即同步操作 root@k8s-01:~/argocd# argocd app sync argocd/demo WARN[0000] Failed to invoke grpc call. Use flag --grpc-web in grpc calls. To avoid this warning message, use flag --grpc-web. TIMESTAMP GROUP KIND NAMESPACE NAME STATUS HEALTH HOOK MESSAGE 2025-08-19T07:00:05+00:00 Service default myapp OutOfSync Healthy 2025-08-19T07:00:05+00:00 apps Deployment default myapp OutOfSync Healthy 2025-08-19T07:00:05+00:00 traefik.io IngressRoute default myapp OutOfSync 2025-08-19T07:00:05+00:00 Service default myapp Synced Healthy 2025-08-19T07:00:05+00:00 Service default myapp Synced Healthy service/myapp configured 2025-08-19T07:00:05+00:00 apps Deployment default myapp OutOfSync Healthy deployment.apps/myapp configured 2025-08-19T07:00:05+00:00 traefik.io IngressRoute default myapp OutOfSync ingressroute.traefik.io/myapp configured 2025-08-19T07:00:05+00:00 apps Deployment default myapp Synced Healthy deployment.apps/myapp configured 2025-08-19T07:00:05+00:00 traefik.io IngressRoute default myapp Synced ingressroute.traefik.io/myapp configured Name: argocd/demo Project: default Server: https://kubernetes.default.svc Namespace: URL: https://argocd.local.com:30443/applications/argocd/demo Source: - Repo: http://192.168.30.181/develop/argo-demo.git Target: HEAD Path: manifests SyncWindow: Sync Allowed Sync Policy: Manual Sync Status: Synced to HEAD (293d75f) Health Status: Healthy Operation: Sync Sync Revision: 293d75f441403c3f19c888df50939ec3a9e6f1fa Phase: Succeeded Start: 2025-08-19 07:00:05 +0000 UTC Finished: 2025-08-19 07:00:05 +0000 UTC Duration: 0s Message: successfully synced (all tasks run) GROUP KIND NAMESPACE NAME STATUS HEALTH HOOK MESSAGE Service default myapp Synced Healthy service/myapp configured apps Deployment default myapp Synced Healthy deployment.apps/myapp configured traefik.io IngressRoute default myapp Synced ingressroute.traefik.io/myapp configured1.3yaml文件创建[root@tiaoban ~]# cat demo.yaml apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: demo namespace: argocd spec: destination: namespace: default server: 'https://kubernetes.default.svc' source: path: manifests # yaml资源清单路径 repoURL: 'http://gitlab.local.com/devops/argo-demo.git' # 同步仓库地址 targetRevision: 'master' # 分支名称 sources: [] project: default syncPolicy: automated: prune: false selfHeal: false [root@tiaoban ~]# kubectl apply -f demo.yaml application.argoproj.io/demo created二、应用同步选项 2.1同步策略配置SYNC POLICY:同步策略 Argo CD能够在检测到 Git 中所需的清单与集群中的实时状态之间存在差异时自动同步应用程序。自动同步是GitOps Pull模式的核心,好处是 CI/CD Pipeline 不再需要直接访问Argo CD API服务器来执行部署,可以通过在WEB UI的Application-SYNC POLICY中启用AUTOMATED或CLIargocd app set <APPNAME> --sync-policy automated 进行配置。PRUNE RESOURCES :自动删除资源,开启选项后Git Repo中删除资源会自动在环境中删除对应的资源。SELF HEAL:自动痊愈,强制以GitRepo状态为准,手动在环境修改不会生效。2.2AutoSync自动同步默认同步周期是180s, 可以修改argocd-cm配置文件,添加timeout.reconciliation参数。同步流程: 1. 获取所有设置为auto-sync的apps 2. 从每个app的git存储库中获取最新状态 3. 将git状态与集群应用状态对比 4. 如果相同,不执行任何操作并标记为synced 5. 如果不同,标记为out-of-sync2.3SyncOptions同步选项- Validate=false:禁用Kubectl验证 - Replace=true:kubectl replace替换 - PrunePropagationPolicy=background:级联删除策略(background, foreground and orphan.)ApplyOutOfSyncOnly=true:仅同步不同步状态的资源。避免大量对象时资源API消耗 - CreateNamespace=true:创建namespace - PruneLast=true:同步后进行修剪 - RespectlgnoreDifferences=true:支持忽略差异配置(ignoreDifferences:) - ServerSideApply=true:部署操作在服务端运行(避免文件过大)三、应用状态 sync status - Synced:已同步 - OutOfSync:未同步 health status - Progressing:正在执行 - Suspended:资源挂载暂停 - Healthy:资源健康 - Degraded:资源故障 - Missing:集群不存在资源
2025年08月19日
4 阅读
0 评论
0 点赞
2025-08-19
ArgoCD监控
参考文档:https://argo-cd.readthedocs.io/en/stable/operator-manual/metrics/一、配置targets 1.1查看metrics信息[root@tiaoban ~]# kubectl get svc -n argocd NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE argocd-applicationset-controller ClusterIP 10.97.81.94 <none> 7000/TCP,8080/TCP 27d argocd-dex-server ClusterIP 10.106.72.83 <none> 5556/TCP,5557/TCP,5558/TCP 27d argocd-metrics ClusterIP 10.103.26.87 <none> 8082/TCP 27d argocd-notifications-controller-metrics ClusterIP 10.105.181.100 <none> 9001/TCP 27d argocd-redis ClusterIP 10.100.131.134 <none> 6379/TCP 27d argocd-repo-server ClusterIP 10.100.123.80 <none> 8081/TCP,8084/TCP 27d argocd-server NodePort 10.106.11.146 <none> 80:30701/TCP,443:30483/TCP 27d argocd-server-metrics ClusterIP 10.105.164.150 <none> 8083/TCP 27d [root@tiaoban ~]# kubectl exec -it rockylinux -- bash [root@rockylinux /]# curl argocd-metrics.argocd.svc:8082/metrics # HELP argocd_app_info Information about application. # TYPE argocd_app_info gauge argocd_app_info{autosync_enabled="true",dest_namespace="default",dest_server="https://kubernetes.default.svc",health_status="Healthy",name="blue-green",namespace="argocd",operation="",project="default",repo="http://gitlab.local.com/devops/argo-demo",sync_status="Synced"} 1 # HELP argocd_app_reconcile Application reconciliation performance. # TYPE argocd_app_reconcile histogram argocd_app_reconcile_bucket{dest_server="https://kubernetes.default.svc",namespace="argocd",le="0.25"} 12 argocd_app_reconcile_bucket{dest_server="https://kubernetes.default.svc",namespace="argocd",le="0.5"} 18 argocd_app_reconcile_bucket{dest_server="https://kubernetes.default.svc",namespace="argocd",le="1"} 21 argocd_app_reconcile_bucket{dest_server="https://kubernetes.default.svc",namespace="argocd",le="2"} 21 argocd_app_reconcile_bucket{dest_server="https://kubernetes.default.svc",namespace="argocd",le="4"} 22 argocd_app_reconcile_bucket{dest_server="https://kubernetes.default.svc",namespace="argocd",le="8"} 241.2创建ServiceMonitor资源apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: argocd-exporter # ServiceMonitor名称 namespace: monitoring # ServiceMonitor所在名称空间 spec: jobLabel: argocd-exporter # job名称 endpoints: # prometheus所采集Metrics地址配置,endpoints为一个数组,可以创建多个,但是每个endpoints包含三个字段interval、path、port - port: metrics # prometheus采集数据的端口,这里为port的name,主要是通过spec.selector中选择对应的svc,在选中的svc中匹配该端口 interval: 30s # prometheus采集数据的周期,单位为秒 scheme: http # 协议 path: /metrics # prometheus采集数据的路径 selector: # svc标签选择器 matchLabels: app.kubernetes.io/name: argocd-metrics namespaceSelector: # namespace选择 matchNames: - argocd1.3验证targets二、grafana查看数据 2.1导入dashboard参考文档:https://grafana.com/grafana/dashboards/14584-argocd/2.2查看数据
2025年08月19日
5 阅读
0 评论
0 点赞
2025-08-19
ArgoCD project
一、Project创建通过项目,可以配置对应用程序的访问控制策略。例如,可以指定哪些用户或团队有权在特定命名空间或集群中进行部署操作。提供了资源隔离的功能,确保不同项目之间的资源不会互相干扰。这有助于维护不同团队或应用程序之间的清晰界限。 最佳实践应该是为每个gitlab group在argoCD中创建对应的Project,便于各个组之间权限资源相互隔离。1.1webUI创建1.2CLI创建## argocd CLI # login argocd login argocd.idevops.site # list argocd proj list # remove argocd proj remove dev1 # create argocd proj create --help argocd proj create dev2 argocd proj list argocd proj add-source dev2 http://github.com/dev2/app.git1.3yaml创建示例文档: https://argo-cd.readthedocs.io/en/stable/operator-manual/project.yamlapiVersion: argoproj.io/v1alpha1 kind: AppProject metadata: name: dev3 namespace: argocd finalizers: - resources-finalizer.argocd.argoproj.io spec: description: Example Project sourceRepos: - 'https://github.com/dev3/app.git' destinations: - namespace: dev3 server: https://kubernetes.default.svc name: in-cluster # Deny all cluster-scoped resources from being created, except for Namespace clusterResourceWhitelist: - group: '' kind: Namespace # Allow all namespaced-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy namespaceResourceBlacklist: - group: '' kind: ResourceQuota - group: '' kind: LimitRange - group: '' kind: NetworkPolicy # Deny all namespaced-scoped resources from being created, except for Deployment and StatefulSet namespaceResourceWhitelist: - group: 'apps' kind: Deployment - group: 'apps' kind: StatefulSet二、project配置 2.1webUI配置2.2yaml配置apiVersion: argoproj.io/v1alpha1 kind: AppProject metadata: name: dev1 namespace: argocd spec: clusterResourceBlacklist: - group: "" kind: "" clusterResourceWhitelist: - group: "" kind: Namespace description: dev1 group destinations: - name: in-cluster namespace: dev1 server: https://kubernetes.default.svc namespaceResourceWhitelist: - group: '*' kind: '*' roles: - jwtTokens: - iat: 1684030305 id: 12764563-0582-4d2d-afbc-ab2712c5c47e name: dev1-role policies: - p, proj:dev1:dev1-role, applications, get, dev1/*, allow - p, proj:dev1:dev1-role, applications, sync, dev1/*, allow - p, proj:dev1:dev1-role, applications, delete, dev1/*, deny sourceRepos: - http://gitlab.local.com/devops/** ## 根据项目组配置,允许该组下的所有repo - ""三、ProjectRoleProjectRole 是一种用于定义在特定项目 (Project) 范围内的访问控制策略的资源。它允许你对项目中的资源进行细粒度的权限管理,指定哪些用户或服务账户可以执行哪些操作。ProjectRole 主要用于增强安全性和隔离性,确保只有被授权的用户或系统组件可以对项目内的应用程序和资源进行特定操作。3.1创建role我们在demo项目下创建名为dev的角色,配置权限为:允许get sync操作权限,不允许delete操作。3.2创建JWT Tokenroot@k8s-01:~/argocd# argocd proj role create-token demo-project dev-role WARN[0000] Failed to invoke grpc call. Use flag --grpc-web in grpc calls. To avoid this warning message, use flag --grpc-web. Create token succeeded for proj:demo-project:dev-role. ID: 9c150b55-848f-436c-88db-fe61e95874fc Issued At: 2025-08-19T06:31:59Z Expires At: Never Token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJhcmdvY2QiLCJzdWIiOiJwcm9qOmRlbW8tcHJvamVjdDpkZXYtcm9sZSIsIm5iZiI6MTc1NTU4NTExOSwiaWF0IjoxNzU1NTg1MTE5LCJqdGkiOiI5YzE1MGI1NS04NDhmLTQzNmMtODhkYi1mZTYxZTk1ODc0ZmMifQ.54fvz4OOOIo-wsK_hwclCmW0oSIJO1vz2Xgv4Axl08s3.3验证测试# 注销之前登录的admin账号 [root@tiaoban ~]# argocd logout argocd.local.com Logged out from 'argocd.local.com' # 使用token查看app列表 [root@tiaoban ~]# argocd app list --auth-token eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJhcmdvY2QiLCJzdWIiOiJwcm9qOmRlbW8tcHJvamVjdDpkZXYtcm9sZSIsIm5iZiI6MTcxOTExNTk0OSwiaWF0IjoxNzE5MTE1OTQ5LCJqdGkiOiI5MDg5OTc0OC1mYjg2LTRlZjktYjNmMC03MWY4MjBjZjEwZDYifQ.RCLx7U-2RdQ_BD5z8sBW3Ghh5RA6DnwU9VHvmU8EgQM WARN[0000] Failed to invoke grpc call. Use flag --grpc-web in grpc calls. To avoid this warning message, use flag --grpc-web. NAME CLUSTER NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY CONDITIONS REPO PATH TARGET argocd/demo https://kubernetes.default.svc demo-project Synced Healthy Auto <none> http://gitlab.local.com/devops/argo-demo.git manifests HEAD # 使用token执行sync操作 [root@tiaoban ~]# argocd app sync argocd/demo --auth-token eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJhcmdvY2QiLCJzdWIiOiJwcm9qOmRlbW8tcHJvamVjdDpkZXYtcm9sZSIsIm5iZiI6MTcxOTExNTk0OSwiaWF0IjoxNzE5MTE1OTQ5LCJqdGkiOiI5MDg5OTc0OC1mYjg2LTRlZjktYjNmMC03MWY4MjBjZjEwZDYifQ.RCLx7U-2RdQ_BD5z8sBW3Ghh5RA6DnwU9VHvmU8EgQM WARN[0000] Failed to invoke grpc call. Use flag --grpc-web in grpc calls. To avoid this warning message, use flag --grpc-web. TIMESTAMP GROUP KIND NAMESPACE NAME STATUS HEALTH HOOK MESSAGE 2024-06-23T12:20:07+08:00 Service default myapp Synced Healthy 2024-06-23T12:20:07+08:00 apps Deployment default myapp Synced Healthy 2024-06-23T12:20:07+08:00 traefik.containo.us IngressRoute default myapp Synced 2024-06-23T12:20:07+08:00 traefik.containo.us IngressRoute default myapp Synced ingressroute.traefik.containo.us/myapp unchanged 2024-06-23T12:20:07+08:00 Service default myapp Synced Healthy service/myapp unchanged 2024-06-23T12:20:07+08:00 apps Deployment default myapp Synced Healthy deployment.apps/myapp unchanged Name: argocd/demo Project: demo-project Server: https://kubernetes.default.svc Namespace: URL: https://argocd.local.com/applications/argocd/demo Source: - Repo: http://gitlab.local.com/devops/argo-demo.git Target: HEAD Path: manifests SyncWindow: Sync Allowed Sync Policy: Automated Sync Status: Synced to HEAD (0ea8019) Health Status: Healthy Operation: Sync Sync Revision: 0ea801988a54f0ad73808454f2fce5030d3e28ef Phase: Succeeded Start: 2024-06-23 12:20:07 +0800 CST Finished: 2024-06-23 12:20:07 +0800 CST Duration: 0s Message: successfully synced (all tasks run) GROUP KIND NAMESPACE NAME STATUS HEALTH HOOK MESSAGE Service default myapp Synced Healthy service/myapp unchanged apps Deployment default myapp Synced Healthy deployment.apps/myapp unchanged traefik.containo.us IngressRoute default myapp Synced ingressroute.traefik.containo.us/myapp unchanged # 使用token删除应用,提示权限拒绝 [root@tiaoban ~]# argocd app delete argocd/demo --auth-token eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJhcmdvY2QiLCJzdWIiOiJwcm9qOmRlbW8tcHJvamVjdDpkZXYtcm9sZSIsIm5iZiI6MTcxOTExNTk0OSwiaWF0IjoxNzE5MTE1OTQ5LCJqdGkiOiI5MDg5OTc0OC1mYjg2LTRlZjktYjNmMC03MWY4MjBjZjEwZDYifQ.RCLx7U-2RdQ_BD5z8sBW3Ghh5RA6DnwU9VHvmU8EgQM WARN[0000] Failed to invoke grpc call. Use flag --grpc-web in grpc calls. To avoid this warning message, use flag --grpc-web. Are you sure you want to delete 'argocd/demo' and all its resources? [y/n] y FATA[0001] rpc error: code = PermissionDenied desc = permission denied: applications, delete, demo-project/demo, sub: proj:demo-project:dev-role, iat: 2024-06-23T04:12:29Z
2025年08月19日
5 阅读
0 评论
0 点赞
1
2
3
...
23