Controll Plane에서..
sudo apt install nfs-kernel-server -y #nfs설치
sudo mkdir /nfsvolume #nfs디렉토리 생성
echo "<h1> Hello NFS Volume </h1>" | sudo tee /nfsvolume/index.html #html파일 생성
sudo chown -R www-data:www-data /nfsvolume #nfs디렉토리의 소유자 그룹 변경
sudo vi /etc/exports
/nfsvolume 192.168.100.0/24(rw,sync,no_subtree_check,no_root_squash)
### 이렇게 수정
ansible all -i ~/kubespray/inventory/mycluster/inventory.ini -m apt -a 'name=nfs-common' -b
### 다른 노드에도 nfs client를 위한 패키지를 설치해준다.
sudo systemctl restart nfs-kernel-server
systemctl status nfs-kernel-server
exited라고 뜨는게 정상이다.
cd ~
mkdir nfs
cd nfs
vi mypv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: mypv
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 1G
persistentVolumeReclaimPolicy: Retain
nfs:
path: /nfsvolume
server: 192.168.100.100
vi mypvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1G
storageClassName: '' # For Static Provisioning
volumeName: mypv
vi myweb-rs.yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: myweb-rs
spec:
replicas: 3
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
spec:
containers:
- name: myweb
image: httpd
volumeMounts:
- name: myvol
mountPath: /usr/local/apache2/htdocs
volumes:
- name: myvol
persistentVolumeClaim:
claimName: mypvc
kubectl create -f .
kubectl get rs,po,pv,pvc
CLAIM에 보면 pvc가 연결된 것을 확인할 수 있음
pvc의 상태도 Bound상태로 바뀌었음
sudo mount -t nfs 192.168.100.100:/nfsvolume /mnt
vagrant@knode2:~$ sudo mount -t nfs 192.168.100.100:/nfsvolume /mnt
vagrant@knode2:~$ ls /
bin etc lib32 lost+found opt run srv usr
boot home lib64 media proc sbin sys vagrant
dev lib libx32 mnt root snap tmp var
vagrant@knode2:~$ ls /mnt
index.html
vagrant@knode2:~$ cat /mnt/index.html
<h1> Hello NFS Volume </h1>
vagrant@knode2:~$
Controll Plane에서..
kubectl delete -f .