Skip to content
Snippets Groups Projects
Unverified Commit e119863e authored by ChengHao Yang's avatar ChengHao Yang Committed by GitHub
Browse files

Fix `debian11-custom-cni` failing test & upgrade `debian12-custom-cni-helm` chart version (#11654)


* Test: update custom_cni values

Signed-off-by: default avatarChengHao Yang <17496418+tico88612@users.noreply.github.com>

* Test: fix cilium require kube_owner set to root

Signed-off-by: default avatarChengHao Yang <17496418+tico88612@users.noreply.github.com>

* Test: update custom_cni render manifests

Signed-off-by: default avatarChengHao Yang <17496418+tico88612@users.noreply.github.com>

* Test: fix render template pre-commit

Signed-off-by: default avatarChengHao Yang <17496418+tico88612@users.noreply.github.com>

* Test: update debian12-custom-cni-helm chart version to 1.16.3

Signed-off-by: default avatarChengHao Yang <17496418+tico88612@users.noreply.github.com>

---------

Signed-off-by: default avatarChengHao Yang <17496418+tico88612@users.noreply.github.com>
parent 99c620d5
No related branches found
No related tags found
No related merge requests found
...@@ -6,6 +6,13 @@ metadata: ...@@ -6,6 +6,13 @@ metadata:
name: "cilium" name: "cilium"
namespace: kube-system namespace: kube-system
--- ---
# Source: cilium/templates/cilium-envoy/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-envoy"
namespace: kube-system
---
# Source: cilium/templates/cilium-operator/serviceaccount.yaml # Source: cilium/templates/cilium-operator/serviceaccount.yaml
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
...@@ -36,9 +43,6 @@ data: ...@@ -36,9 +43,6 @@ data:
identity-gc-interval: "15m0s" identity-gc-interval: "15m0s"
cilium-endpoint-gc-interval: "5m0s" cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s" nodes-gc-interval: "5m0s"
skip-cnp-status-startup-clean: "false"
# Disable the usage of CiliumEndpoint CRD
disable-endpoint-crd: "false"
# If you want to run cilium in debug mode change this value to true # If you want to run cilium in debug mode change this value to true
debug: "false" debug: "false"
...@@ -47,6 +51,13 @@ data: ...@@ -47,6 +51,13 @@ data:
# default, always and never. # default, always and never.
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes # https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
enable-policy: "default" enable-policy: "default"
policy-cidr-match-mode: ""
# If you want metrics enabled in cilium-operator, set the port for
# which the Cilium Operator will have their metrics exposed.
# NOTE that this will open the port on the nodes where Cilium operator pod
# is scheduled.
operator-prometheus-serve-addr: ":9963"
enable-metrics: "true"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address. # address.
...@@ -58,7 +69,7 @@ data: ...@@ -58,7 +69,7 @@ data:
# Users who wish to specify their own custom CNI configuration file must set # Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false" custom-cni-conf: "false"
enable-bpf-clock-probe: "true" enable-bpf-clock-probe: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level # If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets # to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output. # that will be seen in monitor output.
...@@ -86,6 +97,10 @@ data: ...@@ -86,6 +97,10 @@ data:
bpf-lb-map-max: "65536" bpf-lb-map-max: "65536"
bpf-lb-external-clusterip: "false" bpf-lb-external-clusterip: "false"
bpf-events-drop-enabled: "true"
bpf-events-policy-verdict-enabled: "true"
bpf-events-trace-enabled: "true"
# Pre-allocation of map entries allows per-packet latency to be reduced, at # Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The # the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation; # default value below will minimize memory usage in the default installation;
...@@ -103,10 +118,6 @@ data: ...@@ -103,10 +118,6 @@ data:
# 1.4 or later, then it may cause one-time disruptions during the upgrade. # 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false" preallocate-bpf-maps: "false"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Name of the cluster. Only relevant when building a mesh of clusters. # Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and # Unique ID of the cluster. Must be unique across all conneted clusters and
...@@ -118,63 +129,444 @@ data: ...@@ -118,63 +129,444 @@ data:
# - disabled # - disabled
# - vxlan (default) # - vxlan (default)
# - geneve # - geneve
tunnel: "vxlan" # Default case
routing-mode: "tunnel"
tunnel-protocol: "vxlan"
service-no-backend-response: "reject"
# Enables L7 proxy for L7 policy enforcement and visibility # Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true" enable-l7-proxy: "true"
enable-ipv4-masquerade: "true" enable-ipv4-masquerade: "true"
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false" enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true" enable-ipv6-masquerade: "true"
enable-tcx: "true"
datapath-mode: "veth"
enable-masquerade-to-route-source: "false"
enable-xt-socket-fallback: "true" enable-xt-socket-fallback: "true"
install-iptables-rules: "true"
install-no-conntrack-iptables-rules: "false" install-no-conntrack-iptables-rules: "false"
auto-direct-node-routes: "false" auto-direct-node-routes: "false"
direct-routing-skip-unreachable: "false"
enable-local-redirect-policy: "false" enable-local-redirect-policy: "false"
enable-runtime-device-detection: "true"
kube-proxy-replacement: "disabled" kube-proxy-replacement: "false"
kube-proxy-replacement-healthz-bind-address: ""
bpf-lb-sock: "false" bpf-lb-sock: "false"
bpf-lb-sock-terminate-pod-connections: "false"
enable-host-port: "false"
enable-external-ips: "false"
enable-node-port: "false"
nodeport-addresses: ""
enable-health-check-nodeport: "true" enable-health-check-nodeport: "true"
enable-health-check-loadbalancer-ip: "false"
node-port-bind-protection: "true" node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true" enable-auto-protect-node-port-range: "true"
bpf-lb-acceleration: "disabled"
enable-svc-source-range-check: "true" enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "true" enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s" arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
enable-k8s-networkpolicy: "true"
# Tell the agent to generate and write a CNI configuration file
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
cni-exclusive: "true"
cni-log-file: "/var/run/cilium/cilium-cni.log"
enable-endpoint-health-checking: "true" enable-endpoint-health-checking: "true"
enable-health-checking: "true" enable-health-checking: "true"
enable-well-known-identities: "false" enable-well-known-identities: "false"
enable-remote-node-identity: "true" enable-node-selector-labels: "false"
synchronize-k8s-nodes: "true" synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234" operator-api-serve-addr: "127.0.0.1:9234"
ipam: "cluster-pool" ipam: "cluster-pool"
ipam-cilium-node-update-rate: "15s"
cluster-pool-ipv4-cidr: "{{ kube_pods_subnet }}" cluster-pool-ipv4-cidr: "{{ kube_pods_subnet }}"
cluster-pool-ipv4-mask-size: "24" cluster-pool-ipv4-mask-size: "24"
disable-cnp-status-updates: "true" egress-gateway-reconciliation-trigger-interval: "1s"
enable-vtep: "false" enable-vtep: "false"
vtep-endpoint: "" vtep-endpoint: ""
vtep-cidr: "" vtep-cidr: ""
vtep-mask: "" vtep-mask: ""
vtep-mac: "" vtep-mac: ""
enable-bgp-control-plane: "false"
procfs: "/host/proc" procfs: "/host/proc"
bpf-root: "/sys/fs/bpf" bpf-root: "/sys/fs/bpf"
cgroup-root: "/run/cilium/cgroupv2" cgroup-root: "/run/cilium/cgroupv2"
enable-k8s-terminating-endpoint: "true" enable-k8s-terminating-endpoint: "true"
enable-sctp: "false" enable-sctp: "false"
k8s-client-qps: "10"
k8s-client-burst: "20"
remove-cilium-node-taints: "true" remove-cilium-node-taints: "true"
set-cilium-node-taints: "true"
set-cilium-is-up-condition: "true" set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15" unmanaged-pod-watcher-interval: "15"
# default DNS proxy to transparent mode in non-chaining modes
dnsproxy-enable-transparent-mode: "true"
dnsproxy-socket-linger-timeout: "10"
tofqdns-dns-reject-response-code: "refused" tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true" tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50" tofqdns-endpoint-max-ip-per-hostname: "50"
tofqdns-idle-connection-grace-period: "0s" tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000" tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-min-ttl: "3600"
tofqdns-proxy-response-max-delay: "100ms" tofqdns-proxy-response-max-delay: "100ms"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
mesh-auth-enabled: "true"
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
mesh-auth-gc-interval: "5m0s"
proxy-xff-num-trusted-hops-ingress: "0"
proxy-xff-num-trusted-hops-egress: "0"
proxy-connect-timeout: "2"
proxy-max-requests-per-connection: "0"
proxy-max-connection-duration-seconds: "0"
proxy-idle-timeout-seconds: "60"
external-envoy-proxy: "true"
envoy-base-id: "0"
envoy-keep-cap-netbindservice: "false"
max-connected-clusters: "255"
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
nat-map-stats-entries: "32"
nat-map-stats-interval: "30s"
# Extra config allows adding arbitrary properties to the cilium config.
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.
---
# Source: cilium/templates/cilium-envoy/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-envoy-config
namespace: kube-system
data:
bootstrap-config.json: |
{
"node": {
"id": "host~127.0.0.1~no-id~localdomain",
"cluster": "ingress-cluster"
},
"staticResources": {
"listeners": [
{
"name": "envoy-prometheus-metrics-listener",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 9964
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "envoy-prometheus-metrics-listener",
"route_config": {
"virtual_hosts": [
{
"name": "prometheus_metrics_route",
"domains": [
"*"
],
"routes": [
{
"name": "prometheus_metrics_route",
"match": {
"prefix": "/metrics"
},
"route": {
"cluster": "/envoy-admin",
"prefix_rewrite": "/stats/prometheus"
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.filters.http.router",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
],
"stream_idle_timeout": "0s"
}
}
]
}
]
},
{
"name": "envoy-health-listener",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9878
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "envoy-health-listener",
"route_config": {
"virtual_hosts": [
{
"name": "health",
"domains": [
"*"
],
"routes": [
{
"name": "health",
"match": {
"prefix": "/healthz"
},
"route": {
"cluster": "/envoy-admin",
"prefix_rewrite": "/ready"
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.filters.http.router",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
],
"stream_idle_timeout": "0s"
}
}
]
}
]
}
],
"clusters": [
{
"name": "ingress-cluster",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s"
},
{
"name": "egress-cluster-tls",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"upstreamHttpProtocolOptions": {},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s",
"transportSocket": {
"name": "cilium.tls_wrapper",
"typedConfig": {
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
}
}
},
{
"name": "egress-cluster",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s"
},
{
"name": "ingress-cluster-tls",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"upstreamHttpProtocolOptions": {},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s",
"transportSocket": {
"name": "cilium.tls_wrapper",
"typedConfig": {
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
}
}
},
{
"name": "xds-grpc-cilium",
"type": "STATIC",
"connectTimeout": "2s",
"loadAssignment": {
"clusterName": "xds-grpc-cilium",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"pipe": {
"path": "/var/run/cilium/envoy/sockets/xds.sock"
}
}
}
}
]
}
]
},
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {}
}
}
}
},
{
"name": "/envoy-admin",
"type": "STATIC",
"connectTimeout": "2s",
"loadAssignment": {
"clusterName": "/envoy-admin",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"pipe": {
"path": "/var/run/cilium/envoy/sockets/admin.sock"
}
}
}
}
]
}
]
}
}
]
},
"dynamicResources": {
"ldsConfig": {
"apiConfigSource": {
"apiType": "GRPC",
"transportApiVersion": "V3",
"grpcServices": [
{
"envoyGrpc": {
"clusterName": "xds-grpc-cilium"
}
}
],
"setNodeOnFirstMessageOnly": true
},
"resourceApiVersion": "V3"
},
"cdsConfig": {
"apiConfigSource": {
"apiType": "GRPC",
"transportApiVersion": "V3",
"grpcServices": [
{
"envoyGrpc": {
"clusterName": "xds-grpc-cilium"
}
}
],
"setNodeOnFirstMessageOnly": true
},
"resourceApiVersion": "V3"
}
},
"bootstrapExtensions": [
{
"name": "envoy.bootstrap.internal_listener",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"
}
}
],
"layeredRuntime": {
"layers": [
{
"name": "static_layer_0",
"staticLayer": {
"overload": {
"global_downstream_max_connections": 50000
}
}
}
]
},
"admin": {
"address": {
"pipe": {
"path": "/var/run/cilium/envoy/sockets/admin.sock"
}
}
}
}
--- ---
# Source: cilium/templates/cilium-agent/clusterrole.yaml # Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
...@@ -228,6 +620,9 @@ rules: ...@@ -228,6 +620,9 @@ rules:
resources: resources:
- ciliumloadbalancerippools - ciliumloadbalancerippools
- ciliumbgppeeringpolicies - ciliumbgppeeringpolicies
- ciliumbgpnodeconfigs
- ciliumbgpadvertisements
- ciliumbgppeerconfigs
- ciliumclusterwideenvoyconfigs - ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies - ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies - ciliumegressgatewaypolicies
...@@ -239,6 +634,9 @@ rules: ...@@ -239,6 +634,9 @@ rules:
- ciliumnetworkpolicies - ciliumnetworkpolicies
- ciliumnodes - ciliumnodes
- ciliumnodeconfigs - ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs: verbs:
- list - list
- watch - watch
...@@ -275,10 +673,10 @@ rules: ...@@ -275,10 +673,10 @@ rules:
- apiGroups: - apiGroups:
- cilium.io - cilium.io
resources: resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status - ciliumendpoints/status
- ciliumendpoints - ciliumendpoints
- ciliuml2announcementpolicies/status
- ciliumbgpnodeconfigs/status
verbs: verbs:
- patch - patch
--- ---
...@@ -301,6 +699,15 @@ rules: ...@@ -301,6 +699,15 @@ rules:
# to automatically delete [core|kube]dns pods so that are starting to being # to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium # managed by Cilium
- delete - delete
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- cilium-config
verbs:
# allow patching of the configmap to set annotations
- patch
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
...@@ -416,6 +823,9 @@ rules: ...@@ -416,6 +823,9 @@ rules:
resources: resources:
- ciliumendpointslices - ciliumendpointslices
- ciliumenvoyconfigs - ciliumenvoyconfigs
- ciliumbgppeerconfigs
- ciliumbgpadvertisements
- ciliumbgpnodeconfigs
verbs: verbs:
- create - create
- update - update
...@@ -442,6 +852,11 @@ rules: ...@@ -442,6 +852,11 @@ rules:
resourceNames: resourceNames:
- ciliumloadbalancerippools.cilium.io - ciliumloadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io - ciliumbgppeeringpolicies.cilium.io
- ciliumbgpclusterconfigs.cilium.io
- ciliumbgppeerconfigs.cilium.io
- ciliumbgpadvertisements.cilium.io
- ciliumbgpnodeconfigs.cilium.io
- ciliumbgpnodeconfigoverrides.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io - ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io - ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io - ciliumegressgatewaypolicies.cilium.io
...@@ -454,14 +869,27 @@ rules: ...@@ -454,14 +869,27 @@ rules:
- ciliumnetworkpolicies.cilium.io - ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io - ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io - ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
- apiGroups: - apiGroups:
- cilium.io - cilium.io
resources: resources:
- ciliumloadbalancerippools - ciliumloadbalancerippools
- ciliumpodippools
- ciliumbgppeeringpolicies
- ciliumbgpclusterconfigs
- ciliumbgpnodeconfigoverrides
verbs: verbs:
- get - get
- list - list
- watch - watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups: - apiGroups:
- cilium.io - cilium.io
resources: resources:
...@@ -550,6 +978,31 @@ subjects: ...@@ -550,6 +978,31 @@ subjects:
name: "cilium" name: "cilium"
namespace: kube-system namespace: kube-system
--- ---
# Source: cilium/templates/cilium-envoy/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cilium-envoy
namespace: kube-system
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9964"
labels:
k8s-app: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
io.cilium/app: proxy
spec:
clusterIP: None
type: ClusterIP
selector:
k8s-app: cilium-envoy
ports:
- name: envoy-metrics
port: 9964
protocol: TCP
targetPort: envoy-metrics
---
# Source: cilium/templates/cilium-agent/daemonset.yaml # Source: cilium/templates/cilium-agent/daemonset.yaml
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
...@@ -571,21 +1024,17 @@ spec: ...@@ -571,21 +1024,17 @@ spec:
template: template:
metadata: metadata:
annotations: annotations:
# Set app AppArmor's profile to "unconfined". The value of this annotation
# can be modified as long users know which profiles they have available
# in AppArmor.
container.apparmor.security.beta.kubernetes.io/cilium-agent: "unconfined"
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: "unconfined"
container.apparmor.security.beta.kubernetes.io/mount-cgroup: "unconfined"
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: "unconfined"
labels: labels:
k8s-app: cilium k8s-app: cilium
app.kubernetes.io/name: cilium-agent app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium app.kubernetes.io/part-of: cilium
spec: spec:
securityContext:
appArmorProfile:
type: Unconfined
containers: containers:
- name: cilium-agent - name: cilium-agent
image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- cilium-agent - cilium-agent
...@@ -603,6 +1052,7 @@ spec: ...@@ -603,6 +1052,7 @@ spec:
failureThreshold: 105 failureThreshold: 105
periodSeconds: 2 periodSeconds: 2
successThreshold: 1 successThreshold: 1
initialDelaySeconds: 5
livenessProbe: livenessProbe:
httpGet: httpGet:
host: "127.0.0.1" host: "127.0.0.1"
...@@ -642,26 +1092,38 @@ spec: ...@@ -642,26 +1092,38 @@ spec:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG - name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/ value: /var/lib/cilium/clustermesh/
- name: CILIUM_CNI_CHAINING_MODE - name: GOMEMLIMIT
valueFrom:
configMapKeyRef:
name: cilium-config
key: cni-chaining-mode
optional: true
- name: CILIUM_CUSTOM_CNI_CONF
valueFrom: valueFrom:
configMapKeyRef: resourceFieldRef:
name: cilium-config resource: limits.memory
key: custom-cni-conf divisor: '1'
optional: true
lifecycle: lifecycle:
postStart: postStart:
exec: exec:
command: command:
- "/cni-install.sh" - "bash"
- "--enable-debug=false" - "-c"
- "--cni-exclusive=true" - |
- "--log-file=/var/run/cilium/cilium-cni.log" set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop: preStop:
exec: exec:
command: command:
...@@ -688,6 +1150,9 @@ spec: ...@@ -688,6 +1150,9 @@ spec:
- ALL - ALL
terminationMessagePolicy: FallbackToLogsOnError terminationMessagePolicy: FallbackToLogsOnError
volumeMounts: volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
# Unprivileged containers need to mount /proc/sys/net from the host # Unprivileged containers need to mount /proc/sys/net from the host
# to have write access # to have write access
- mountPath: /host/proc/sys/net - mountPath: /host/proc/sys/net
...@@ -705,8 +1170,6 @@ spec: ...@@ -705,8 +1170,6 @@ spec:
mountPropagation: HostToContainer mountPropagation: HostToContainer
- name: cilium-run - name: cilium-run
mountPath: /var/run/cilium mountPath: /var/run/cilium
- name: cni-path
mountPath: /host/opt/cni/bin
- name: etc-cni-netd - name: etc-cni-netd
mountPath: /host/etc/cni/net.d mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets - name: clustermesh-secrets
...@@ -722,10 +1185,10 @@ spec: ...@@ -722,10 +1185,10 @@ spec:
mountPath: /tmp mountPath: /tmp
initContainers: initContainers:
- name: config - name: config
image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- cilium - cilium-dbg
- build-config - build-config
env: env:
- name: K8S_NODE_NAME - name: K8S_NODE_NAME
...@@ -745,7 +1208,7 @@ spec: ...@@ -745,7 +1208,7 @@ spec:
# Required to mount cgroup2 filesystem on the underlying Kubernetes node. # Required to mount cgroup2 filesystem on the underlying Kubernetes node.
# We use nsenter command with host's cgroup and mount namespaces enabled. # We use nsenter command with host's cgroup and mount namespaces enabled.
- name: mount-cgroup - name: mount-cgroup
image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
- name: CGROUP_ROOT - name: CGROUP_ROOT
...@@ -782,7 +1245,7 @@ spec: ...@@ -782,7 +1245,7 @@ spec:
drop: drop:
- ALL - ALL
- name: apply-sysctl-overwrites - name: apply-sysctl-overwrites
image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
- name: BIN_PATH - name: BIN_PATH
...@@ -820,7 +1283,7 @@ spec: ...@@ -820,7 +1283,7 @@ spec:
# from a privileged container because the mount propagation bidirectional # from a privileged container because the mount propagation bidirectional
# only works from privileged containers. # only works from privileged containers.
- name: mount-bpf-fs - name: mount-bpf-fs
image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
args: args:
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
...@@ -836,7 +1299,7 @@ spec: ...@@ -836,7 +1299,7 @@ spec:
mountPath: /sys/fs/bpf mountPath: /sys/fs/bpf
mountPropagation: Bidirectional mountPropagation: Bidirectional
- name: clean-cilium-state - name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.13.0@sha256:6544a3441b086a2e09005d3e21d1a4afb216fae19c5a60b35793c8a9438f8f68" image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- /init-container.sh - /init-container.sh
...@@ -853,6 +1316,12 @@ spec: ...@@ -853,6 +1316,12 @@ spec:
name: cilium-config name: cilium-config
key: clean-cilium-bpf-state key: clean-cilium-bpf-state
optional: true optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
name: cilium-config
key: write-cni-conf-when-ready
optional: true
terminationMessagePolicy: FallbackToLogsOnError terminationMessagePolicy: FallbackToLogsOnError
securityContext: securityContext:
seLinuxOptions: seLinuxOptions:
...@@ -874,15 +1343,32 @@ spec: ...@@ -874,15 +1343,32 @@ spec:
mountPath: /run/cilium/cgroupv2 mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer mountPropagation: HostToContainer
- name: cilium-run - name: cilium-run
mountPath: /var/run/cilium mountPath: /var/run/cilium # wait-for-kube-proxy
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
image: "quay.io/cilium/cilium:v1.16.3@sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28"
imagePullPolicy: IfNotPresent
command:
- "/install-plugin.sh"
resources: resources:
requests: requests:
cpu: 100m cpu: 100m
memory: 100Mi # wait-for-kube-proxy memory: 10Mi
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: cni-path
mountPath: /host/opt/cni/bin # .Values.cni.install
restartPolicy: Always restartPolicy: Always
priorityClassName: system-node-critical priorityClassName: system-node-critical
serviceAccount: "cilium"
serviceAccountName: "cilium" serviceAccountName: "cilium"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1 terminationGracePeriodSeconds: 1
hostNetwork: true hostNetwork: true
affinity: affinity:
...@@ -910,7 +1396,7 @@ spec: ...@@ -910,7 +1396,7 @@ spec:
hostPath: hostPath:
path: /sys/fs/bpf path: /sys/fs/bpf
type: DirectoryOrCreate type: DirectoryOrCreate
# To mount cgroup2 filesystem on the host # To mount cgroup2 filesystem on the host or apply sysctlfix
- name: hostproc - name: hostproc
hostPath: hostPath:
path: /proc path: /proc
...@@ -939,13 +1425,48 @@ spec: ...@@ -939,13 +1425,48 @@ spec:
hostPath: hostPath:
path: /run/xtables.lock path: /run/xtables.lock
type: FileOrCreate type: FileOrCreate
# Sharing socket with Cilium Envoy on the same node by using a host path
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
# To read the clustermesh configuration # To read the clustermesh configuration
- name: clustermesh-secrets - name: clustermesh-secrets
secret: projected:
secretName: cilium-clustermesh
# note: the leading zero means this number is in octal representation: do not remove it # note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400 defaultMode: 0400
sources:
- secret:
name: cilium-clustermesh
optional: true optional: true
# note: items are not explicitly listed here, since the entries of this secret
# depend on the peers configured, and that would cause a restart of all agents
# at every addition/removal. Leaving the field empty makes each secret entry
# to be automatically projected into the volume as a file whose name is the key.
- secret:
name: clustermesh-apiserver-remote-cert
optional: true
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
# note: we configure the volume for the kvstoremesh-specific certificate
# regardless of whether KVStoreMesh is enabled or not, so that it can be
# automatically mounted in case KVStoreMesh gets subsequently enabled,
# without requiring an agent restart.
- secret:
name: clustermesh-apiserver-local-cert
optional: true
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
- name: host-proc-sys-net - name: host-proc-sys-net
hostPath: hostPath:
path: /proc/sys/net path: /proc/sys/net
...@@ -955,6 +1476,174 @@ spec: ...@@ -955,6 +1476,174 @@ spec:
path: /proc/sys/kernel path: /proc/sys/kernel
type: Directory type: Directory
--- ---
# Source: cilium/templates/cilium-envoy/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium-envoy
namespace: kube-system
labels:
k8s-app: cilium-envoy
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
name: cilium-envoy
spec:
selector:
matchLabels:
k8s-app: cilium-envoy
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: cilium-envoy
name: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
containers:
- name: cilium-envoy
image: "quay.io/cilium/cilium-envoy:v1.29.9-1728346947-0d05e48bfbb8c4737ec40d5781d970a550ed2bbd@sha256:42614a44e508f70d03a04470df5f61e3cffd22462471a0be0544cf116f2c50ba"
imagePullPolicy: IfNotPresent
command:
- /usr/bin/cilium-envoy-starter
args:
- '--'
- '-c /var/run/cilium/envoy/bootstrap-config.json'
- '--base-id 0'
- '--log-level info'
- '--log-format [%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v'
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
ports:
- name: envoy-metrics
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
- name: envoy-artifacts
mountPath: /var/run/cilium/envoy/artifacts
readOnly: true
- name: envoy-config
mountPath: /var/run/cilium/envoy/
readOnly: true
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium-envoy"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium-envoy
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
- name: envoy-artifacts
hostPath:
path: "/var/run/cilium/envoy/artifacts"
type: DirectoryOrCreate
- name: envoy-config
configMap:
name: cilium-envoy-config
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: bootstrap-config.json
path: bootstrap-config.json
# To keep state between restarts / upgrades
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
---
# Source: cilium/templates/cilium-operator/deployment.yaml # Source: cilium/templates/cilium-operator/deployment.yaml
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
...@@ -974,14 +1663,20 @@ spec: ...@@ -974,14 +1663,20 @@ spec:
matchLabels: matchLabels:
io.cilium/app: operator io.cilium/app: operator
name: cilium-operator name: cilium-operator
# ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
# of one replica and no user configured Recreate strategy.
# otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
# podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
strategy: strategy:
rollingUpdate: rollingUpdate:
maxSurge: 1 maxSurge: 25%
maxUnavailable: 1 maxUnavailable: 50%
type: RollingUpdate type: RollingUpdate
template: template:
metadata: metadata:
annotations: annotations:
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
labels: labels:
io.cilium/app: operator io.cilium/app: operator
name: cilium-operator name: cilium-operator
...@@ -990,7 +1685,7 @@ spec: ...@@ -990,7 +1685,7 @@ spec:
spec: spec:
containers: containers:
- name: cilium-operator - name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.13.0@sha256:4b58d5b33e53378355f6e8ceb525ccf938b7b6f5384b35373f1f46787467ebf5" image: "quay.io/cilium/operator-generic:v1.16.3@sha256:6e2925ef47a1c76e183c48f95d4ce0d34a1e5e848252f910476c3e11ce1ec94b"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- cilium-operator-generic - cilium-operator-generic
...@@ -1014,6 +1709,11 @@ spec: ...@@ -1014,6 +1709,11 @@ spec:
key: debug key: debug
name: cilium-config name: cilium-config
optional: true optional: true
ports:
- name: prometheus
containerPort: 9963
hostPort: 9963
protocol: TCP
livenessProbe: livenessProbe:
httpGet: httpGet:
host: "127.0.0.1" host: "127.0.0.1"
...@@ -1023,6 +1723,16 @@ spec: ...@@ -1023,6 +1723,16 @@ spec:
initialDelaySeconds: 60 initialDelaySeconds: 60
periodSeconds: 10 periodSeconds: 10
timeoutSeconds: 3 timeoutSeconds: 3
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 5
volumeMounts: volumeMounts:
- name: cilium-config-path - name: cilium-config-path
mountPath: /tmp/cilium/config-map mountPath: /tmp/cilium/config-map
...@@ -1031,8 +1741,8 @@ spec: ...@@ -1031,8 +1741,8 @@ spec:
hostNetwork: true hostNetwork: true
restartPolicy: Always restartPolicy: Always
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
serviceAccount: "cilium-operator"
serviceAccountName: "cilium-operator" serviceAccountName: "cilium-operator"
automountServiceAccountToken: true
# In HA mode, cilium-operator pods must not be scheduled on the same # In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other. # node as they will clash with each other.
affinity: affinity:
...@@ -1051,6 +1761,3 @@ spec: ...@@ -1051,6 +1761,3 @@ spec:
- name: cilium-config-path - name: cilium-config-path
configMap: configMap:
name: cilium-config name: cilium-config
---
# Source: cilium/templates/cilium-secrets-namespace.yaml
# Only create the namespace if it's different from Ingress secret namespace or Ingress is not enabled.
...@@ -8,4 +8,4 @@ hubble: ...@@ -8,4 +8,4 @@ hubble:
ipam: ipam:
operator: operator:
# Set the appropriate pods subnet # Set the appropriate pods subnet
clusterPoolIPv4PodCIDR: "{{ kube_pods_subnet }}" clusterPoolIPv4PodCIDRList: ["{{ kube_pods_subnet }}"]
...@@ -4,6 +4,7 @@ cloud_image: debian-11 ...@@ -4,6 +4,7 @@ cloud_image: debian-11
mode: default mode: default
# Kubespray settings # Kubespray settings
kube_owner: root
kube_network_plugin: custom_cni kube_network_plugin: custom_cni
custom_cni_manifests: custom_cni_manifests:
- "{{ playbook_dir }}/../tests/files/custom_cni/cilium.yaml" - "{{ playbook_dir }}/../tests/files/custom_cni/cilium.yaml"
...@@ -11,7 +11,7 @@ custom_cni_chart_release_name: cilium ...@@ -11,7 +11,7 @@ custom_cni_chart_release_name: cilium
custom_cni_chart_repository_name: cilium custom_cni_chart_repository_name: cilium
custom_cni_chart_repository_url: https://helm.cilium.io custom_cni_chart_repository_url: https://helm.cilium.io
custom_cni_chart_ref: cilium/cilium custom_cni_chart_ref: cilium/cilium
custom_cni_chart_version: 1.14.3 custom_cni_chart_version: 1.16.3
custom_cni_chart_values: custom_cni_chart_values:
cluster: cluster:
name: kubespray name: kubespray
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment