[!Tip]
k8s
部署nfs
转载请注明出处:https://janrs.com
alma
版本:8.6
nfs
版本:nfs-utils-1.3.0
rpcbind
版本:0.2.0
k8s
版本:1.23.9
k8s 部署 nfs
[!NOTE]
分为两部分:
- 每台节点都安装
nfs
软件。master
和node
都要- 部署
NFS Provisioner
提供攻台分配卷
安装 nfs
每个节点都要安装并且设置开机启动
dnf install nfs-utils -y && \
systemctl enable rpcbind && \
systemctl enable nfs-utils && \
systemctl enable nfs-server
systemctl start rpcbind && \
systemctl start nfs-utils && \
systemctl start nfs-server
debian 10
安装 nfs
apt install nfs-kernel-server -y
以下只需要在提供
nfs
服务的机器操作
配置 nfs
修改 kube-apiserver 参数
在 k8s
的 1.23.9
版本中,需要设置特性门控
配置后重启。有配置就不用管
--feature-gates=RemoveSelfLink=false
创建服务端
创建目录
mkdir -p /nfs/data && \
chmod -R 777 /nfs/data/
编辑默认的 nfs
配置文件
cat > /etc/exports <<EOF
/nfs/data *(rw,no_root_squash,sync)
EOF
# 使配置生效
exportfs -r && \
# 查看是否生效
exportfs
重启服务
systemctl restart rpcbind && \
systemctl restart nfs-utils && \
systemctl restart nfs-server
查看 rpc
服务的注册情况
rpcinfo -p localhost
显示
program vers proto port service
100000 4 tcp 111 portmapper
100000 3 tcp 111 portmapper
100000 2 tcp 111 portmapper
100000 4 udp 111 portmapper
100000 3 udp 111 portmapper
100000 2 udp 111 portmapper
100024 1 udp 47474 status
100024 1 tcp 55313 status
100005 1 udp 20048 mountd
100005 1 tcp 20048 mountd
100005 2 udp 20048 mountd
100005 2 tcp 20048 mountd
100005 3 udp 20048 mountd
100005 3 tcp 20048 mountd
100003 3 tcp 2049 nfs
100003 4 tcp 2049 nfs
100227 3 tcp 2049 nfs_acl
100003 3 udp 2049 nfs
100227 3 udp 2049 nfs_acl
100021 1 udp 44686 nlockmgr
100021 3 udp 44686 nlockmgr
100021 4 udp 44686 nlockmgr
100021 1 tcp 36083 nlockmgr
100021 3 tcp 36083 nlockmgr
100021 4 tcp 36083 nlockmg
showmount
测试
showmount -e ${LOCALHOST_IP_ADDR}
显示以下信息表示安装成功
Export list for 172.16.222.121:
/nfs/data *
创建客户端
没给客户端都要安装,都则 pod 无法正常部署
安装
alam
使用以下命令安装
dnf install nfs-utils nfs4-acl-tools -y
debian
使用以下命令安装
apt install nfs-common -y
检测
showmount -e 172.16.222.232
本地创建跟服务端同样的目录
mkdir -p /nfs/data/
挂载
mount -t nfs 172.16.222.232:/nfs/data/ /nfs/data/
部署 NFS Provisioner
创建命名空间
kubectl create ns nfs
创建 RBAC 权限
cat > /home/nfs-rbac.yaml <<EOF
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs #替换成你要部署NFS Provisioner的 Namespace
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs #替换成你要部署NFS Provisioner的 Namespace
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
EOF
kubectl apply -f /nfs/nfs-rbac.yaml -n nfs
部署 NFS Provisioner
cat > /home/nfs-dp.yaml <<EOF
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate #---设置升级策略为删除再创建(默认为滚动更新)
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
imagePullSecrets: #此处我使用的是自己构建的阿里镜像仓库
- name: aliimagesecret #这是密钥
containers:
- name: nfs-client-provisioner
image: registry.cn-shenzhen.aliyuncs.com/yjy_k8s/nfs-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-client #--- nfs-provisioner的名称,以后设置的storageclass要和这个保持一致
- name: NFS_SERVER
value: 172.16.106.205 #---NFS服务器地址,和 valumes 保持一致
- name: NFS_PATH
value: /nfs/data #---NFS服务器目录,和 valumes 保持一致
volumes:
- name: nfs-client-root
nfs:
server: 172.16.106.205 #---NFS服务器地址
path: /nfs/data #---NFS服务器目录
EOF
kubectl apply -f /home/nfs-dp.yaml -n nfs
查看
kubectl get pods -n nfs
显示
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-767b54667b-q47cm 1/1 Running 0 4s
创建 SC
cat > /home/nfs-sc.yaml <<EOF
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true" #---设置为默认的storageclass
provisioner: nfs-client #---动态卷分配者名称,必须和上面创建的"provisioner"变量中设置的Name一致
parameters:
archiveOnDelete: "true" #---设置为"false"时删除PVC不会保留数据,"true"则保留数据
EOF
kubectl apply -f /home/nfs-sc.yaml -n nfs
发表回复