在kubernetes中部署Nacos集群
- 1.Nacos介绍
- 1.1 什么是Nacos
- 1.2 主要功能
- 1.3 应用场景
- 2.部署Nacos集群实践
- 2.1 NFS动态提供Kubernetes后端存储卷
- 2.2 Nacos集群
1.Nacos介绍
1.1 什么是Nacos
Nacos(全称为"Dynamic Naming and Configuration Service")是一个开源的动态服务发现、配置管理和服务管理平台,主要用于微服务架构中的服务治理。Nacos支持服务的注册与发现、动态配置管理以及流量管理等功能,帮助开发者更方便地构建和管理分布式应用。
1.2 主要功能
- 服务注册与发现:Nacos允许服务在启动时将自己注册到服务中心,并能在运行时动态发现其他服务,便于微服务之间的通信。
- 动态配置管理:支持集中化配置管理,能够实时推送配置变更,并能进行灰度发布、版本控制等操作。
- 动态DNS和负载均衡:提供服务发现的DNS解析功能,支持自动的负载均衡,确保流量能够均匀分配到健康节点。
- 数据持久化:支持数据存储到关系型数据库和分布式数据库,保证数据的一致性和高可用性。
1.3 应用场景
- 微服务架构中的服务发现与治理。
- 分布式系统中的配置管理。
- 实时的配置更新和推送,减少服务中断和更新带来的风险。
与 Zookeeper 相比,Nacos更适合用于微服务环境,功能更为全面(不仅支持服务发现,还支持配置管理等)。
与 Consul 相比,Nacos提供更强的动态配置管理功能,能够灵活应对复杂的配置需求。
2.部署Nacos集群实践
2.1 NFS动态提供Kubernetes后端存储卷
- NFS服务端(172.2.10.5)
#优化
systemctl disable firewalld
systemctl stop firewalld
getenforce #查看selinux是否关闭
setenforce 0
sed -i "s#enforcing#disabled#g" /etc/selinux/config
#安装
dnf install -y nfs-utils
#配置
mkdir /nfs-share
cat > /etc/exports << EOF
/nfs-share *(rw,sync,no_root_squash)
EOF
systemctl enable nfs
systemctl start nfs
- NFS客户端(k8s工作节点)
dnf install -y nfs-utils
showmount -e #查看开放目录
- 部署nfs-client-provisioner
kubectl create ns nfs-client-provisioner
cat > nfs-client-provisioner.yaml << EOF
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
namespace: nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
namespace: nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs-client-provisioner
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: nfs-client-provisioner
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: nfs-client-provisioner
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
namespace: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccount: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: eipwork/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 172.2.10.5
- name: NFS_PATH
value: /nfs-share
volumes:
- name: nfs-client-root
nfs:
server: 172.2.10.5
path: /nfs-share
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
namespace: nfs-client-provisioner
provisioner: fuseim.pri/ifs
parameters:
archiveOnDelete: "false"
EOF
kubectl apply -f nfs-client-provisioner.yaml
2.2 Nacos集群
- clone project
git clone https://github.com/nacos-group/nacos-k8s.git
- mysql standlone
cat > mysql-standlone.yaml << EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mysql-dynamic-pvc
namespace: nacos
annotations:
volume.beta.kubernetes.io/storage-provisioner: fuseim.pri/ifs
volume.kubernetes.io/storage-provisioner: fuseim.pri/ifs
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: managed-nfs-storage
volumeMode: Filesystem
---
kind: ConfigMap
apiVersion: v1
metadata:
name: mysql-conf
namespace: nacos
data:
my.cnf: |-
[client]
default-character-set=utf8mb4
[mysql]
default-character-set=utf8mb4
[mysqld]
init_connect='SET collation_connection = utf8mb4_unicode_ci'
init_connect='SET NAMES utf8mb4'
character-set-server=utf8mb4
collation-server=utf8mb4_unicode_ci
skip-character-set-client-handshake
skip-name-resolve
---
kind: Secret
apiVersion: v1
metadata:
name: mysql-root-pwd
namespace: nacos
data:
MYSQL_ROOT_PASSWORD: MTIzNDU2
type: Opaque
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: nacos-mysql8
namespace: nacos
labels:
app: nacos-mysql8
spec:
replicas: 1
selector:
matchLabels:
app: nacos-mysql8
template:
metadata:
labels:
app: nacos-mysql8
spec:
volumes:
- name: host-time
hostPath:
path: /etc/localtime
- name: volume-3tup41
persistentVolumeClaim:
claimName: mysql-dynamic-pvc
- name: volume-x4lvm1
configMap:
name: mysql-conf
defaultMode: 420
containers:
- name: mysql-p1d1l2
image: 'mysql:8.0.36'
ports:
- name: tcp-0
containerPort: 3306
protocol: TCP
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-root-pwd
key: MYSQL_ROOT_PASSWORD
resources: {}
volumeMounts:
- name: host-time
readOnly: true
mountPath: /etc/localtime
- name: volume-3tup41
mountPath: /var/lib/mysql
- name: volume-x4lvm1
readOnly: true
mountPath: /etc/mysql/conf.d
EOF
#部署
kubectl apply -f mysql-standlone.yaml
#mysql 数据库和用户配置
cd nacos-k8s/operator/config/sql/
kubectl cp nacos-mysql.sql nacos-mysql8-0:/root -n nacos
kubectl exec -it nacos-mysql8-0 /bin/sh -n nacos
mysql -uroot -p'123456'
mysql> source nacos-mysql.sql;
mysql> drop user 'root'@'%';
mysql> CREATE USER 'root'@'%' IDENTIFIED WITH 'mysql_native_password' BY '123456';
mysql> grant all privileges on *.* to root@'%';
mysql> flush privileges;
- Nacos Cluster
cd nacos-k8s/deploy/nacos
cat > nacos-pvc-nfs.yaml << EOF
apiVersion: v1
kind: Service
metadata:
name: nacos-headless
namespace: nacos
labels:
app: nacos
spec:
publishNotReadyAddresses: true
ports:
- port: 8848
name: server
targetPort: 8848
- port: 9848
name: client-rpc
targetPort: 9848
- port: 9849
name: raft-rpc
targetPort: 9849
## 兼容1.4.x版本的选举端口
- port: 7848
name: old-raft-rpc
targetPort: 7848
clusterIP: None
selector:
app: nacos
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nacos-cm
namespace: nacos
data:
mysql.host: "nacos-mysql-headless"
mysql.db.name: "nacos"
mysql.db.param: 'characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false&allowPublicKeyRetrieval=true'
mysql.port: "3306"
mysql.user: "root"
mysql.password: "123456"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nacos
namespace: nacos
spec:
podManagementPolicy: Parallel
serviceName: nacos-headless
replicas: 3
template:
metadata:
labels:
app: nacos
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- nacos
topologyKey: "kubernetes.io/hostname"
initContainers:
- name: peer-finder-plugin-install
image: ansonnz/nacos-peer-finder-plugins-arm64:1.1
imagePullPolicy: Always
volumeMounts:
- mountPath: /home/nacos/plugins/peer-finder
name: data
subPath: peer-finder
containers:
- name: nacos
imagePullPolicy: Always
image: nacos/nacos-server:v2.4.2
resources:
requests:
memory: "2Gi"
cpu: "500m"
ports:
- containerPort: 8848
name: client-port
- containerPort: 9848
name: client-rpc
- containerPort: 9849
name: raft-rpc
- containerPort: 7848
name: old-raft-rpc
env:
- name: NACOS_REPLICAS
value: "3"
- name: SERVICE_NAME
value: "nacos-headless"
- name: DOMAIN_NAME
value: "cluster.local"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: MYSQL_SERVICE_HOST
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.host
- name: MYSQL_SERVICE_DB_NAME
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.db.name
- name: MYSQL_SERVICE_PORT
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.port
- name: MYSQL_SERVICE_USER
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.password
- name: SPRING_DATASOURCE_PLATFORM
value: "mysql"
- name: NACOS_SERVER_PORT
value: "8848"
- name: NACOS_APPLICATION_PORT
value: "8848"
- name: PREFER_HOST_MODE
value: "hostname"
- name: NACOS_AUTH_IDENTITY_KEY
value: "example"
- name: NACOS_AUTH_IDENTITY_VALUE
value: "example"
#https://blog.csdn.net/weixin_51268477/article/details/143145509
- name: NACOS_AUTH_TOKEN
value: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- name: NACOS_AUTH_CACHE_ENABLE
value: "true"
- name: NACOS_AUTH_ENABLE
value: "true"
volumeMounts:
- name: data
mountPath: /home/nacos/plugins/peer-finder
subPath: peer-finder
- name: data
mountPath: /home/nacos/data
subPath: data
- name: data
mountPath: /home/nacos/logs
subPath: logs
volumeClaimTemplates:
- metadata:
name: data
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 20Gi
selector:
matchLabels:
app: nacos
EOF
kubectl apply -f nacos-pvc-nfs.yaml
Nodeport 同一局域网内访问
kind: Service
apiVersion: v1
metadata:
name: nacos-nodeport
namespace: nacos
labels:
app: nacos-nodeport
spec:
ports:
- name: http-8848
protocol: TCP
port: 8848
targetPort: 8848
nodePort: 32001
selector:
app: nacos
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
internalTrafficPolicy: Cluster