Cilium - Cilium 기본 설치 및 통신 확인 (2)

Gyullbb·약 14시간 전
0

K8S

목록 보기
14/14

Migration to Cilium

현재 많은 Kubernetes 클러스터에서 Flannel 또는 Calico와 같은 전통적인 CNI(Container Network Interface) 플러그인이 사용되고 있다.

하지만 최근 eBPF 기반의 고성능 네트워킹 기능이 각광받으면서, CNI 플러그인을 Cilium으로 전환하는 방향이 검토되고 있다. 다음은 Flannel과 Calico에서 Cilium으로 마이그레이션하는 간단한 데모이다.

Flannel to Cilium

기존 Flannel 환경 클러스터 점검

(|HomeLab:N/A) root@k8s-ctr:~# helm list -A
NAME   	NAMESPACE   	REVISION	UPDATED                                	STATUS  	CHART          	APP VERSION
flannel	kube-flannel	1       	2025-07-19 19:42:16.187290147 +0900 KST	deployed	flannel-v0.27.1	v0.27.1

(|HomeLab:N/A) root@k8s-ctr:~# tree /opt/cni/bin/ | grep flannel
├── flannel

(|HomeLab:N/A) root@k8s-ctr:~# tree /etc/cni/net.d/
/etc/cni/net.d/
└── 10-flannel.conflist

(|HomeLab:N/A) root@k8s-ctr:~# cat /etc/cni/net.d/10-flannel.conflist | jq
{
  "name": "cbr0",
  "cniVersion": "0.3.1",
  "plugins": [
    {
      "type": "flannel",
      "delegate": {
        "hairpinMode": true,
        "isDefaultGateway": true
      }
    },
    {
      "type": "portmap",
      "capabilities": {
        "portMappings": true
      }
    }
  ]
}

(|HomeLab:N/A) root@k8s-ctr:~# kc describe cm -n kube-flannel kube-flannel-cfg
...
Data
====
net-conf.json:
----
{
  "Network": "10.244.0.0/16",
  "Backend": {
    "Type": "vxlan"
  }
}

(|HomeLab:N/A) root@k8s-ctr:~# ip -c route | grep 10.244.
10.244.0.0/24 dev cni0 proto kernel scope link src 10.244.0.1
10.244.1.0/24 via 10.244.1.0 dev flannel.1 onlink
10.244.3.0/24 via 10.244.3.0 dev flannel.1 onlink

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get nodes
NAME      STATUS   ROLES           AGE     VERSION
k8s-ctr   Ready    control-plane   3h41m   v1.33.2
k8s-w1    Ready    <none>          3h40m   v1.33.2
k8s-w2    Ready    <none>          3h39m   v1.33.2

(|HomeLab:N/A) root@k8s-ctr:~# brctl show
bridge name	bridge id		STP enabled	interfaces
cni0		8000.5e70c06d50fe	no		veth10a6853e
							veth75d3a172
							vethbf10bd3c

(|HomeLab:N/A) root@k8s-ctr:~# iptables -t nat -S | wc -l
77

(|HomeLab:N/A) root@k8s-ctr:~# iptables -t filter -S | wc -l
30

(|HomeLab:N/A) root@k8s-ctr:~# iptables -t nat -S > nat_flannel
(|HomeLab:N/A) root@k8s-ctr:~# iptables -t filter -S > filter_flannel

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get pods -o wide -A
NAMESPACE      NAME                              READY   STATUS    RESTARTS   AGE     IP               NODE      NOMINATED NODE   READINESS GATES
default        curl-pod                          1/1     Running   0          91m     10.244.0.4       k8s-ctr   <none>           <none>
default        webpod-697b545f57-45wbp           1/1     Running   0          91m     10.244.1.2       k8s-w1    <none>           <none>
default        webpod-697b545f57-bvz97           1/1     Running   0          91m     10.244.3.2       k8s-w2    <none>           <none>
kube-flannel   kube-flannel-ds-6jj2z             1/1     Running   0          93m     192.168.10.102   k8s-w2    <none>           <none>
kube-flannel   kube-flannel-ds-8lnj8             1/1     Running   0          93m     192.168.10.101   k8s-w1    <none>           <none>
kube-flannel   kube-flannel-ds-l69mp             1/1     Running   0          93m     192.168.10.100   k8s-ctr   <none>           <none>
kube-system    coredns-674b8bbfcf-sk67g          1/1     Running   0          3h41m   10.244.0.2       k8s-ctr   <none>           <none>
kube-system    coredns-674b8bbfcf-xlw52          1/1     Running   0          3h41m   10.244.0.3       k8s-ctr   <none>           <none>
kube-system    etcd-k8s-ctr                      1/1     Running   0          3h41m   192.168.10.100   k8s-ctr   <none>           <none>
kube-system    kube-apiserver-k8s-ctr            1/1     Running   0          3h41m   192.168.10.100   k8s-ctr   <none>           <none>
kube-system    kube-controller-manager-k8s-ctr   1/1     Running   0          3h41m   192.168.10.100   k8s-ctr   <none>           <none>
kube-system    kube-proxy-dn95s                  1/1     Running   0          3h41m   192.168.10.100   k8s-ctr   <none>           <none>
kube-system    kube-proxy-kr4xv                  1/1     Running   0          3h40m   192.168.10.101   k8s-w1    <none>           <none>
kube-system    kube-proxy-ldx7j                  1/1     Running   0          3h39m   192.168.10.102   k8s-w2    <none>           <none>
kube-system    kube-scheduler-k8s-ctr            1/1     Running   0          3h41m   192.168.10.100   k8s-ctr   <none>           <none>

기존 Flannel CNI 제거

# helm uninstall -n kube-flannel flannel
# helm list -A

# kubectl get all -n kube-flannel
# kubectl delete ns kube-flannel

# kubectl get pod -A -owide

vnic 제거
# ip link del flannel.1
# ip link del cni0

제거 확인
(|HomeLab:N/A) root@k8s-ctr:~# ip -c link
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 08:00:27:71:19:d8 brd ff:ff:ff:ff:ff:ff
    altname enp0s8
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
    link/ether 08:00:27:d8:a8:88 brd ff:ff:ff:ff:ff:ff
    altname enp0s9
6: veth75d3a172@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP mode DEFAULT group default qlen 1000
    link/ether 5e:94:82:fd:f5:19 brd ff:ff:ff:ff:ff:ff link-netns cni-51adc222-7922-b776-8c89-11b2530104a7
7: vethbf10bd3c@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP mode DEFAULT group default qlen 1000
    link/ether 0e:f0:10:d4:33:65 brd ff:ff:ff:ff:ff:ff link-netns cni-ad9cc013-1136-0f8d-3bdc-8335414f68b8
8: veth10a6853e@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP mode DEFAULT group default qlen 1000
    link/ether aa:22:ce:ca:55:5d brd ff:ff:ff:ff:ff:ff link-netns cni-70f28269-5810-35aa-459d-42d545727521

(|HomeLab:N/A) root@k8s-ctr:~# brctl show

(|HomeLab:N/A) root@k8s-ctr:~# ip -c route
default via 10.0.2.2 dev eth0 proto dhcp src 10.0.2.15 metric 100
10.0.2.0/24 dev eth0 proto kernel scope link src 10.0.2.15 metric 100
10.0.2.2 dev eth0 proto dhcp scope link src 10.0.2.15 metric 100
10.0.2.3 dev eth0 proto dhcp scope link src 10.0.2.15 metric 100
192.168.10.0/24 dev eth1 proto kernel scope link src 192.168.10.100

기존 kube-proxy 제거 및 노드별 파드 IPAM(pod CIDR) 확인

kube-controller-manager는 --allocate-node-cidrs=true 옵션이 설정되어 있을 경우, --cluster-cidr 플래그로 지정된 CIDR을 노드에 자동 할당한다.

# kubectl -n kube-system delete ds kube-proxy
# kubectl -n kube-system delete cm kube-proxy

# iptables-save | grep -v KUBE | grep -v FLANNEL | iptables-restore
# iptables-save

제거 확인
(|HomeLab:N/A) root@k8s-ctr:~# kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.podCIDR}{"\n"}{end}'
k8s-ctr	10.244.0.0/24
k8s-w1	10.244.1.0/24
k8s-w2	10.244.3.0/24

(|HomeLab:N/A) root@k8s-ctr:~# kc describe pod -n kube-system kube-controller-manager-k8s-ctr | grep -e cidr -e 'service-cluster-ip-range'
      --allocate-node-cidrs=true
      --cluster-cidr=10.244.0.0/16
      --service-cluster-ip-range=10.96.0.0/16

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get pod -o wide
NAME                      READY   STATUS    RESTARTS   AGE    IP           NODE      NOMINATED NODE   READINESS GATES
curl-pod                  1/1     Running   0          105m   10.244.0.4   k8s-ctr   <none>           <none>
webpod-697b545f57-45wbp   1/1     Running   0          105m   10.244.1.2   k8s-w1    <none>           <none>
webpod-697b545f57-bvz97   1/1     Running   0          105m   10.244.3.2   k8s-w2    <none>           <none>

Cilium 설치

# helm repo add cilium https://helm.cilium.io/

# 
helm install cilium cilium/cilium --version 1.17.5 --namespace kube-system \
--set k8sServiceHost=192.168.10.100 --set k8sServicePort=6443 \
--set kubeProxyReplacement=true \
--set routingMode=native \
--set autoDirectNodeRoutes=true \
--set ipam.mode="cluster-pool" \
--set ipam.operator.clusterPoolIPv4PodCIDRList={"172.20.0.0/16"} \
--set ipv4NativeRoutingCIDR=172.20.0.0/16 \
--set endpointRoutes.enabled=true \
--set installNoConntrackIptablesRules=true \
--set bpf.masquerade=true \
--set ipv6.enabled=false
옵션설명
kubeProxyReplacement=truekube-proxy를 완전히 대체하여 Cilium이 직접 kube-proxy 기능을 수행하도록 설정.
routingMode=nativeCilium의 라우팅 모드를 native 모드로 설정 (Linux 커널 네이티브 라우팅 사용).
autoDirectNodeRoutes=true노드 간 트래픽을 위해 자동으로 직접 라우팅 경로 설정.
ipam.mode="cluster-pool"IP 주소 할당 모드를 클러스터 풀 모드로 설정 (Cilium이 직접 IP 할당 관리).
ipam.operator.clusterPoolIPv4PodCIDRList={"172.20.0.0/16"}클러스터에서 사용할 Pod용 IPv4 CIDR 풀을 설정.
ipv4NativeRoutingCIDR=172.20.0.0/16노드의 네이티브 라우팅에 사용할 IPv4 CIDR 범위 설정.
endpointRoutes.enabled=true각 Pod에 대한 라우팅 경로를 별도로 생성하여 네트워크 경로를 최적화.
installNoConntrackIptablesRules=trueCilium이 conntrack 기반 iptables 규칙을 설치하지 않도록 설정 (eBPF 방식 우선).
bpf.masquerade=trueeBPF를 사용해 IP 마스커레이딩 수행 (NAT 대체).

설치 확인

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get ciliumnodes -o json | grep podCIDRs -A2
                    "podCIDRs": [
                        "172.20.0.0/24"
                    ],
--
                    "podCIDRs": [
                        "172.20.1.0/24"
                    ],
--
                    "podCIDRs": [
                        "172.20.2.0/24"
                    ],

파드 재배포 후 IP 확인
(|HomeLab:N/A) root@k8s-ctr:~# kubectl get pod -owide
NAME                      READY   STATUS    RESTARTS   AGE   IP             NODE      NOMINATED NODE   READINESS GATES
curl-pod                  1/1     Running   0          33s   172.20.0.150   k8s-ctr   <none>           <none>
webpod-85ccc4b7dd-qphtq   1/1     Running   0          74s   172.20.2.150   k8s-w2    <none>           <none>
webpod-85ccc4b7dd-tqc9r   1/1     Running   0          71s   172.20.1.165   k8s-w1    <none>           <none>

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get ciliumendpoints
NAME                      SECURITY IDENTITY   ENDPOINT STATE   IPV4           IPV6
curl-pod                  16024               ready            172.20.0.150
webpod-85ccc4b7dd-qphtq   47031               ready            172.20.2.150
webpod-85ccc4b7dd-tqc9r   47031               ready            172.20.1.165

(|HomeLab:N/A) root@k8s-ctr:~# kubectl exec -it -n kube-system ds/cilium -c cilium-agent -- cilium-dbg endpoint list

Calico to Cilium

Calico 설치

#
helm repo add projectcalico https://docs.tigera.io/calico/charts
helm repo update
helm install calico projectcalico/tigera-operator \
  --namespace tigera-operator \
  --create-namespace

cat <<EOF > installation.yaml
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  calicoNetwork:
    ipPools:
    - blockSize: 26
      cidr: 10.244.0.0/16
      encapsulation: VXLAN
      natOutgoing: Enabled
      nodeSelector: all()
EOF

kubectl apply installation.yaml

기존 Calico 환경 클러스터 점검

(|HomeLab:N/A) root@k8s-ctr:~# helm list -A
NAME  	NAMESPACE      	REVISION	UPDATED                                	STATUS  	CHART                  	APP VERSION
calico	tigera-operator	1       	2025-07-19 22:45:44.341876006 +0900 KST	deployed	tigera-operator-v3.30.2	v3.30.2

(|HomeLab:N/A) root@k8s-ctr:~# tree /opt/cni/bin/ | grep calico
├── calico
├── calico-ipam

(|HomeLab:N/A) root@k8s-ctr:~# tree /etc/cni/net.d/
/etc/cni/net.d/
├── 10-calico.conflist
└── calico-kubeconfig

(|HomeLab:N/A) root@k8s-ctr:~# cat /etc/cni/net.d/10-calico.conflist | jq
{
  "name": "k8s-pod-network",
  "cniVersion": "0.3.1",
  "plugins": [
    {
      "container_settings": {
        "allow_ip_forwarding": false
      },
      "datastore_type": "kubernetes",
      "endpoint_status_dir": "/var/run/calico/endpoint-status",
      "ipam": {
        "assign_ipv4": "true",
        "assign_ipv6": "false",
        "type": "calico-ipam"
      },
      "kubernetes": {
        "k8s_api_root": "https://10.96.0.1:443",
        "kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
      },
      "log_file_max_age": 30,
      "log_file_max_count": 10,
      "log_file_max_size": 100,
      "log_file_path": "/var/log/calico/cni/cni.log",
      "log_level": "Info",
      "mtu": 0,
      "nodename_file_optional": false,
      "policy": {
        "type": "k8s"
      },
      "policy_setup_timeout_seconds": 0,
      "type": "calico"
    },
    {
      "capabilities": {
        "portMappings": true
      },
      "snat": true,
      "type": "portmap"
    }
  ]
}

(|HomeLab:N/A) root@k8s-ctr:~# ip -c route | grep 10.244.
10.244.46.0/26 via 10.244.46.8 dev vxlan.calico onlink
10.244.46.8 dev vxlan.calico scope link
blackhole 10.244.78.64/26 proto 80
10.244.78.65 dev cali4aab08373ba scope link
10.244.78.66 dev cali33014ef5b3d scope link
10.244.228.64/26 via 10.244.228.66 dev vxlan.calico onlink
10.244.228.66 dev vxlan.calico scope link

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get pod -o wide -A | grep 10.244 | grep ctr
calico-system      csi-node-driver-mvzvw                      2/2     Running   0          12m   10.244.78.65     k8s-ctr   <none>           <none>
calico-system      whisker-7f5b7c657b-975qr                   2/2     Running   0          12m   10.244.78.66     k8s-ctr   <none>           <none>

(|HomeLab:N/A) root@k8s-ctr:~# ip link show vxlan.calico
9: vxlan.calico: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
    link/ether 66:65:25:b2:65:0a brd ff:ff:ff:ff:ff:ff

(|HomeLab:N/A) root@k8s-ctr:~# ip link show | grep cali
4: cali4aab08373ba@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
5: cali33014ef5b3d@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1480 qdisc noqueue state UP mode DEFAULT group default qlen 1000
9: vxlan.calico: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000

샘플 Pod, Svc 배포
(|HomeLab:N/A) root@k8s-ctr:~# kubectl get pods -o wide
NAME                      READY   STATUS    RESTARTS   AGE   IP              NODE      NOMINATED NODE   READINESS GATES
curl-pod                  1/1     Running   0          10s   10.244.78.68    k8s-ctr   <none>           <none>
webpod-697b545f57-fbmmm   1/1     Running   0          10s   10.244.228.67   k8s-w1    <none>           <none>
webpod-697b545f57-fg6bg   1/1     Running   0          10s   10.244.46.9     k8s-w2    <none>           <none>

기존 Calico CNI 제거

# helm uninstall -n tigera-operator calico
# helm list -A

# kubectl get all -n calico-system
# kubectl delete ns tigera-operator

# ip link del vxlan.calico

기존 kube-proxy 제거 및 노드별 파드 IPAM(pod CIDR) 확인

# kubectl -n kube-system delete ds kube-proxy
# kubectl -n kube-system delete cm kube-proxy

# iptables-save | grep -v KUBE | grep -vi cali | iptables-restore
# iptables-save

# reboot

제거 확인
(|HomeLab:N/A) root@k8s-ctr:~# kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.podCIDR}{"\n"}{end}'
k8s-ctr	10.244.0.0/24
k8s-w1	10.244.1.0/24
k8s-w2	10.244.2.0/24]

(|HomeLab:N/A) root@k8s-ctr:~# kc describe pod -n kube-system kube-controller-manager-k8s-ctr | grep -e cidr -e 'service-cluster-ip-range'
      --allocate-node-cidrs=true
      --cluster-cidr=10.244.0.0/16
      --service-cluster-ip-range=10.96.0.0/16

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get pod -o wide
NAME                      READY   STATUS    RESTARTS   AGE   IP              NODE      NOMINATED NODE   READINESS GATES
curl-pod                  1/1     Running   0          21m   10.244.78.68    k8s-ctr   <none>           <none>
webpod-697b545f57-fbmmm   1/1     Running   0          21m   10.244.228.67   k8s-w1    <none>           <none>
webpod-697b545f57-fg6bg   1/1     Running   0          21m   10.244.46.9     k8s-w2    <none>           <none>

Cilium 설치

위 Flannel => Cilium에서의 과정과 동일하여 생략한다.

설치 확인

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get ciliumnodes -o json | grep podCIDRs -A2
                    "podCIDRs": [
                        "172.20.0.0/24"
                    ],
--
                    "podCIDRs": [
                        "172.20.2.0/24"
                    ],
--
                    "podCIDRs": [
                        "172.20.1.0/24"
                    ],

파드 재배포 후 IP 확인
(|HomeLab:N/A) root@k8s-ctr:~# kubectl get pod -owide
NAME                      READY   STATUS    RESTARTS   AGE   IP             NODE      NOMINATED NODE   READINESS GATES
curl-pod                  1/1     Running   0          20s   172.20.0.6     k8s-ctr   <none>           <none>
webpod-659cd747f8-v9xkm   1/1     Running   0          64s   172.20.1.172   k8s-w2    <none>           <none>
webpod-659cd747f8-zqbk8   1/1     Running   0          67s   172.20.2.151   k8s-w1    <none>           <none>

(|HomeLab:N/A) root@k8s-ctr:~# kubectl get ciliumendpoints
NAME                      SECURITY IDENTITY   ENDPOINT STATE   IPV4           IPV6
curl-pod                  61805               ready            172.20.0.6
webpod-659cd747f8-v9xkm   11362               ready            172.20.1.172
webpod-659cd747f8-zqbk8   11362               ready            172.20.2.151

(|HomeLab:N/A) root@k8s-ctr:~# kubectl exec -it -n kube-system ds/cilium -c cilium-agent -- cilium-dbg endpoint list

0개의 댓글