A glusterfs storage cluster deployment
Note: The following are brief steps for detailed reference to GlusterFS Independent Deployment with 009.Kubernetes Permanent Storage.
1.1 schema
slightly
1.2 Related Planning
Host
|
IP
|
disk
|
Remarks
|
k8smaster01
|
172.24.8.71
|
-
|
Kubernetes Master Node
Heketi Host
|
k8smaster02
|
172.24.8.72
|
-
|
Kubernetes Master Node
Heketi Host
|
k8smaster03
|
172.24.8.73
|
-
|
Kubernetes Master Node
Heketi Host
|
k8snode01
|
172.24.8.74
|
sdb
|
Kubernetes Worker Node
glusterfs 01 node
|
k8snode02
|
172.24.8.75
|
sdb
|
Kubernetes Worker Node
glusterfs 02 Node
|
k8snode03
|
172.24.8.76
|
sdb
|
Kubernetes Worker Node
glusterfs 03 Node
|
Tip: This plan is completed directly with bare disks.
1.3 Install glusterfs
# yum -y install centos-release-gluster
# yum -y install glusterfs-server
# systemctl start glusterd
# systemctl enable glusterd
Tip: All nodes are recommended for installation.
1.4 Add Trust Pool
[root@k8snode01 ~]# gluster peer probe k8snode02
[root@k8snode01 ~]# gluster peer probe k8snode03
[root@k8snode01 ~]# gluster peer status #View trust pool status
[root@k8snode01 ~]# gluster pool list #View list of trust pools
Tip: It only needs to be executed once at any node of glusterfs.
1.5 Install heketi
[root@k8smaster01 ~]# yum -y install heketi heketi-client
1.6 Configure heketi
[root@k8smaster01 ~]# vi /etc/heketi/heketi.json
1 { 2 "_port_comment": "Heketi Server Port Number", 3 "port": "8080", 4 5 "_use_auth": "Enable JWT authorization. Please enable for deployment", 6 "use_auth": true, 7 8 "_jwt": "Private keys for access", 9 "jwt": { 10 "_admin": "Admin has access to all APIs", 11 "admin": { 12 "key": "admin123" 13 }, 14 "_user": "User only has access to /volumes endpoint", 15 "user": { 16 "key": "xianghy" 17 } 18 }, 19 20 "_glusterfs_comment": "GlusterFS Configuration", 21 "glusterfs": { 22 "_executor_comment": [ 23 "Execute plugin. Possible choices: mock, ssh", 24 "mock: This setting is used for testing and development.", 25 " It will not send commands to any node.", 26 "ssh: This setting will notify Heketi to ssh to the nodes.", 27 " It will need the values in sshexec to be configured.", 28 "kubernetes: Communicate with GlusterFS containers over", 29 " Kubernetes exec api." 30 ], 31 "executor": "ssh", 32 33 "_sshexec_comment": "SSH username and private key file information", 34 "sshexec": { 35 "keyfile": "/etc/heketi/heketi_key", 36 "user": "root", 37 "port": "22", 38 "fstab": "/etc/fstab" 39 }, 40 41 "_db_comment": "Database file name", 42 "db": "/var/lib/heketi/heketi.db", 43 44 "_loglevel_comment": [ 45 "Set log level. Choices are:", 46 " none, critical, error, warning, info, debug", 47 "Default is warning" 48 ], 49 "loglevel" : "warning" 50 } 51 }
1.7 Configure Secret-Free Keys
[root@k8smaster01 ~]# ssh-keygen -t rsa -q -f /etc/heketi/heketi_key -N ""
[root@k8smaster01 ~]# chown heketi:heketi /etc/heketi/heketi_key
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode01
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode02
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode03
1.8 Start heketi
[root@k8smaster01 ~]# systemctl enable heketi.service
[root@k8smaster01 ~]# systemctl start heketi.service
[root@k8smaster01 ~]# systemctl status heketi.service
[root@k8smaster01 ~]# curl http://localhost:8080/hello #Test access
1.9 Configure Heketi Topology
[root@k8smaster01 ~]# vi /etc/heketi/topology.json
1 { 2 "clusters": [ 3 { 4 "nodes": [ 5 { 6 "node": { 7 "hostnames": { 8 "manage": [ 9 "k8snode01" 10 ], 11 "storage": [ 12 "172.24.8.74" 13 ] 14 }, 15 "zone": 1 16 }, 17 "devices": [ 18 "/dev/sdb" 19 ] 20 }, 21 { 22 "node": { 23 "hostnames": { 24 "manage": [ 25 "k8snode02" 26 ], 27 "storage": [ 28 "172.24.8.75" 29 ] 30 }, 31 "zone": 1 32 }, 33 "devices": [ 34 "/dev/sdb" 35 ] 36 }, 37 { 38 "node": { 39 "hostnames": { 40 "manage": [ 41 "k8snode03" 42 ], 43 "storage": [ 44 "172.24.8.76" 45 ] 46 }, 47 "zone": 1 48 }, 49 "devices": [ 50 "/dev/sdb" 51 ] 52 } 53 ] 54 } 55 ] 56 }
[root@k8smaster01 ~]# echo "export HEKETI_CLI_SERVER=http://k8smaster01:8080" >> /etc/profile.d/heketi.sh
[root@k8smaster01 ~]# echo "alias heketi-cli='heketi-cli --user admin --secret admin123'" >> .bashrc
[root@k8smaster01 ~]# source /etc/profile.d/heketi.sh
[root@k8smaster01 ~]# source .bashrc
[root@k8smaster01 ~]# echo $HEKETI_CLI_SERVER
http://k8smaster01:8080
[root@k8smaster01 ~]# heketi-cli --server $HEKETI_CLI_SERVER --user admin --secret admin123 topology load --json=/etc/heketi/topology.json
1.10 Cluster Management and Testing
[root@heketi ~]# heketi-cli cluster list #cluster list
[root@heketi ~]# heketi-cli node list #volume information
[root@heketi ~]# heketi-cli volume list #volume information
[root@k8snode01 ~]# gluster volume info # Viewed through the glusterfs node
1.11 Create StorageClass
[root@k8smaster01 study]# vi heketi-secret.yaml
1 apiVersion: v1 2 kind: Secret 3 metadata: 4 name: heketi-secret 5 namespace: heketi 6 data: 7 key: YWRtaW4xMjM= 8 type: kubernetes.io/glusterfs
[root@k8smaster01 study]# kubectl create ns heketi
[root@k8smaster01 study]# kubectl create-f heketi-secret.yaml #create heketi
[root@k8smaster01 study]# kubectl get secrets -n heketi
[root@k8smaster01 study]# vim gluster-heketi-storageclass.yaml #Formally create StorageClass
1 StorageClass 2 apiVersion: storage.k8s.io/v1 3 kind: StorageClass 4 metadata: 5 name: ghstorageclass 6 parameters: 7 resturl: "http://172.24.8.71:8080" 8 clusterid: "ad0f81f75f01d01ebd6a21834a2caa30" 9 restauthenabled: "true" 10 restuser: "admin" 11 secretName: "heketi-secret" 12 secretNamespace: "heketi" 13 volumetype: "replicate:3" 14 provisioner: kubernetes.io/glusterfs 15 reclaimPolicy: Delete
[root@k8smaster01 study]# kubectl create -f gluster-heketi-storageclass.yaml
Note: storageclass resources cannot be changed after they are created. Modifications can only be deleted and rebuilt.
[root@k8smaster01 heketi]# kubectl get storageclasses #View confirmation
NAME PROVISIONER AGE
gluster-heketi-storageclass kubernetes.io/glusterfs 85s
[root@k8smaster01 heketi]# kubectl describe storageclasses ghstorageclass
Two Cluster Monitoring Metrics
Note: The following steps are brief and refer to "049. Cluster Management - Cluster Monitoring Metrics" for details.
2.1 Open Aggregation Layer
Turn on the Aggregation Layer feature, which is turned on by default using kubeadm, to view the validation below.
[root@k8smaster01 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml
2.2 Get deployment files
[root@k8smaster01 ~]# git clone https://github.com/kubernetes-incubator/metrics-server.git
[root@k8smaster01 ~]# cd metrics-server/deploy/1.8+/
[root@k8smaster01 1.8+]# vi metrics-server-deployment.yaml
1 ...... 2 image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6 #Modify to domestic source 3 command: 4 - /metrics-server 5 - --metric-resolution=30s 6 - --kubelet-insecure-tls 7 - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP #Add as above command 8 ......
2.3 Official Deployment
[root@k8smaster01 1.8+]# kubectl apply -f .
[root@k8smaster01 1.8+]# kubectl -n kube-system get pods -l k8s-app=metrics-server
[root@k8smaster01 1.8+]# kubectl-n kube-system logs-l k8s-app=metrics-server-f #View deployment logs
2.4 Confirmation Verification
[root@k8smaster01 ~]# kubectl top nodes
[root@k8smaster01 ~]# kubectl top pods --all-namespaces
Three Prometheus Deployment
Note: The following steps are brief and refer to the "050.Cluster Management-Prometheus+Grafana Monitoring Scheme" for details.
3.1 Get deployment files
[root@k8smaster01 ~]# git clone https://github.com/prometheus/prometheus
3.2 Create Namespace
[root@k8smaster01 ~]# cd prometheus/documentation/examples/
[root@k8smaster01 examples]# vi monitor-namespace.yaml
1 apiVersion: v1 2 kind: Namespace 3 metadata: 4 name: monitoring 5
[root@k8smaster01 examples]# kubectl create -f monitor-namespace.yaml
3.3 Create RBAC
[root@k8smaster01 examples]# vi rbac-setup.yml
1 apiVersion: rbac.authorization.k8s.io/v1beta1 2 kind: ClusterRole 3 metadata: 4 name: prometheus 5 rules: 6 - apiGroups: [""] 7 resources: 8 - nodes 9 - nodes/proxy 10 - services 11 - endpoints 12 - pods 13 verbs: ["get", "list", "watch"] 14 - apiGroups: 15 - extensions 16 resources: 17 - ingresses 18 verbs: ["get", "list", "watch"] 19 - nonResourceURLs: ["/metrics"] 20 verbs: ["get"] 21 --- 22 apiVersion: v1 23 kind: ServiceAccount 24 metadata: 25 name: prometheus 26 namespace: monitoring #Just modify the namespace 27 --- 28 apiVersion: rbac.authorization.k8s.io/v1beta1 29 kind: ClusterRoleBinding 30 metadata: 31 name: prometheus 32 roleRef: 33 apiGroup: rbac.authorization.k8s.io 34 kind: ClusterRole 35 name: prometheus 36 subjects: 37 - kind: ServiceAccount 38 name: prometheus 39 namespace: monitoring #Just modify the namespace
[root@k8smaster01 examples]# kubectl create -f rbac-setup.yml
3.4 Create Prometheus ConfigMap
[root@k8smaster01 examples]# cat prometheus-kubernetes.yml | grep -v ^$ | grep -v "#" >> prometheus-config.yaml
[root@k8smaster01 examples]# vi prometheus-config.yaml
1 apiVersion: v1 2 kind: ConfigMap 3 metadata: 4 name: prometheus-server-conf 5 labels: 6 name: prometheus-server-conf 7 namespace: monitoring #Modify Namespace 8 data: 9 prometheus.yml: |- 10 global: 11 scrape_interval: 10s 12 evaluation_interval: 10s 13 14 scrape_configs: 15 - job_name: 'kubernetes-apiservers' 16 kubernetes_sd_configs: 17 - role: endpoints 18 scheme: https 19 tls_config: 20 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 21 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 22 relabel_configs: 23 - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] 24 action: keep 25 regex: default;kubernetes;https 26 27 - job_name: 'kubernetes-nodes' 28 scheme: https 29 tls_config: 30 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 31 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 32 kubernetes_sd_configs: 33 - role: node 34 relabel_configs: 35 - action: labelmap 36 regex: __meta_kubernetes_node_label_(.+) 37 - target_label: __address__ 38 replacement: kubernetes.default.svc:443 39 - source_labels: [__meta_kubernetes_node_name] 40 regex: (.+) 41 target_label: __metrics_path__ 42 replacement: /api/v1/nodes/${1}/proxy/metrics 43 44 - job_name: 'kubernetes-cadvisor' 45 scheme: https 46 tls_config: 47 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 48 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 49 kubernetes_sd_configs: 50 - role: node 51 relabel_configs: 52 - action: labelmap 53 regex: __meta_kubernetes_node_label_(.+) 54 - target_label: __address__ 55 replacement: kubernetes.default.svc:443 56 - source_labels: [__meta_kubernetes_node_name] 57 regex: (.+) 58 target_label: __metrics_path__ 59 replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor 60 61 - job_name: 'kubernetes-service-endpoints' 62 kubernetes_sd_configs: 63 - role: endpoints 64 relabel_configs: 65 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] 66 action: keep 67 regex: true 68 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] 69 action: replace 70 target_label: __scheme__ 71 regex: (https?) 72 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] 73 action: replace 74 target_label: __metrics_path__ 75 regex: (.+) 76 - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] 77 action: replace 78 target_label: __address__ 79 regex: ([^:]+)(?::\d+)?;(\d+) 80 replacement: $1:$2 81 - action: labelmap 82 regex: __meta_kubernetes_service_label_(.+) 83 - source_labels: [__meta_kubernetes_namespace] 84 action: replace 85 target_label: kubernetes_namespace 86 - source_labels: [__meta_kubernetes_service_name] 87 action: replace 88 target_label: kubernetes_name 89 90 - job_name: 'kubernetes-services' 91 metrics_path: /probe 92 params: 93 module: [http_2xx] 94 kubernetes_sd_configs: 95 - role: service 96 relabel_configs: 97 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] 98 action: keep 99 regex: true 100 - source_labels: [__address__] 101 target_label: __param_target 102 - target_label: __address__ 103 replacement: blackbox-exporter.example.com:9115 104 - source_labels: [__param_target] 105 target_label: instance 106 - action: labelmap 107 regex: __meta_kubernetes_service_label_(.+) 108 - source_labels: [__meta_kubernetes_namespace] 109 target_label: kubernetes_namespace 110 - source_labels: [__meta_kubernetes_service_name] 111 target_label: kubernetes_name 112 113 - job_name: 'kubernetes-ingresses' 114 kubernetes_sd_configs: 115 - role: ingress 116 relabel_configs: 117 - source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe] 118 action: keep 119 regex: true 120 - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] 121 regex: (.+);(.+);(.+) 122 replacement: ${1}://${2}${3} 123 target_label: __param_target 124 - target_label: __address__ 125 replacement: blackbox-exporter.example.com:9115 126 - source_labels: [__param_target] 127 target_label: instance 128 - action: labelmap 129 regex: __meta_kubernetes_ingress_label_(.+) 130 - source_labels: [__meta_kubernetes_namespace] 131 target_label: kubernetes_namespace 132 - source_labels: [__meta_kubernetes_ingress_name] 133 target_label: kubernetes_name 134 135 - job_name: 'kubernetes-pods' 136 kubernetes_sd_configs: 137 - role: pod 138 relabel_configs: 139 - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 140 action: keep 141 regex: true 142 - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] 143 action: replace 144 target_label: __metrics_path__ 145 regex: (.+) 146 - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] 147 action: replace 148 regex: ([^:]+)(?::\d+)?;(\d+) 149 replacement: $1:$2 150 target_label: __address__ 151 - action: labelmap 152 regex: __meta_kubernetes_pod_label_(.+) 153 - source_labels: [__meta_kubernetes_namespace] 154 action: replace 155 target_label: kubernetes_namespace 156 - source_labels: [__meta_kubernetes_pod_name] 157 action: replace 158 target_label: kubernetes_pod_name
[root@k8smaster01 examples]# kubectl create -f prometheus-config.yaml
3.5 Create durable PVC
[root@k8smaster01 examples]# vi prometheus-pvc.yaml
1 apiVersion: v1 2 kind: PersistentVolumeClaim 3 metadata: 4 name: prometheus-pvc 5 namespace: monitoring 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessModes: 10 - ReadWriteMany 11 resources: 12 requests: 13 storage: 5Gi
[root@k8smaster01 examples]# kubectl create -f prometheus-pvc.yaml
3.6 Prometheus Deployment
[root@k8smaster01 examples]# vi prometheus-deployment.yml
1 apiVersion: apps/v1beta2 2 kind: Deployment 3 metadata: 4 labels: 5 name: prometheus-deployment 6 name: prometheus-server 7 namespace: monitoring 8 spec: 9 replicas: 1 10 selector: 11 matchLabels: 12 app: prometheus-server 13 template: 14 metadata: 15 labels: 16 app: prometheus-server 17 spec: 18 containers: 19 - name: prometheus-server 20 image: prom/prometheus:v2.14.0 21 command: 22 - "/bin/prometheus" 23 args: 24 - "--config.file=/etc/prometheus/prometheus.yml" 25 - "--storage.tsdb.path=/prometheus/" 26 - "--storage.tsdb.retention=72h" 27 ports: 28 - containerPort: 9090 29 protocol: TCP 30 volumeMounts: 31 - name: prometheus-config-volume 32 mountPath: /etc/prometheus/ 33 - name: prometheus-storage-volume 34 mountPath: /prometheus/ 35 serviceAccountName: prometheus 36 imagePullSecrets: 37 - name: regsecret 38 volumes: 39 - name: prometheus-config-volume 40 configMap: 41 defaultMode: 420 42 name: prometheus-server-conf 43 - name: prometheus-storage-volume 44 persistentVolumeClaim: 45 claimName: prometheus-pvc
[root@k8smaster01 examples]# kubectl create -f prometheus-deployment.yml
3.7 Create Prometheus Service
[root@k8smaster01 examples]# vi prometheus-service.yaml
1 apiVersion: v1 2 kind: Service 3 metadata: 4 labels: 5 app: prometheus-service 6 name: prometheus-service 7 namespace: monitoring 8 spec: 9 type: NodePort 10 selector: 11 app: prometheus-server 12 ports: 13 - port: 9090 14 targetPort: 9090 15 nodePort: 30001
[root@k8smaster01 examples]# kubectl create -f prometheus-service.yaml
[root@k8smaster01 examples]# kubectl get all -n monitoring
3.8 Verify Prometheus
Direct browser access: http://172.24.8.100:30001/
Four deployments grafana
Note: The following steps are brief and refer to the "050.Cluster Management-Prometheus+Grafana Monitoring Scheme" for details.
4.1 Get deployment files
[root@k8smaster01 ~]# git clone https://github.com/liukuan73/kubernetes-addons
[root@k8smaster01 ~]# cd /root/kubernetes-addons/monitor/prometheus+grafana
4.2 Creating durable PVC
[root@k8smaster01 prometheus+grafana]# vi grafana-data-pvc.yaml
1 apiVersion: v1 2 kind: PersistentVolumeClaim 3 metadata: 4 name: grafana-data-pvc 5 namespace: monitoring 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessModes: 10 - ReadWriteOnce 11 resources: 12 requests: 13 storage: 5Gi
[root@k8smaster01 prometheus+grafana]# kubectl create -f grafana-data-pvc.yaml
4.3 grafana deployment
[root@k8smaster01 prometheus+grafana]# vi grafana.yaml
1 apiVersion: extensions/v1beta1 2 kind: Deployment 3 metadata: 4 name: monitoring-grafana 5 namespace: monitoring 6 spec: 7 replicas: 1 8 template: 9 metadata: 10 labels: 11 task: monitoring 12 k8s-app: grafana 13 spec: 14 containers: 15 - name: grafana 16 image: grafana/grafana:6.5.0 17 imagePullPolicy: IfNotPresent 18 ports: 19 - containerPort: 3000 20 protocol: TCP 21 volumeMounts: 22 - mountPath: /var/lib/grafana 23 name: grafana-storage 24 env: 25 - name: INFLUXDB_HOST 26 value: monitoring-influxdb 27 - name: GF_SERVER_HTTP_PORT 28 value: "3000" 29 - name: GF_AUTH_BASIC_ENABLED 30 value: "false" 31 - name: GF_AUTH_ANONYMOUS_ENABLED 32 value: "true" 33 - name: GF_AUTH_ANONYMOUS_ORG_ROLE 34 value: Admin 35 - name: GF_SERVER_ROOT_URL 36 value: / 37 readinessProbe: 38 httpGet: 39 path: /login 40 port: 3000 41 volumes: 42 - name: grafana-storage 43 persistentVolumeClaim: 44 claimName: grafana-data-pvc 45 nodeSelector: 46 node-role.kubernetes.io/master: "true" 47 tolerations: 48 - key: "node-role.kubernetes.io/master" 49 effect: "NoSchedule" 50 --- 51 apiVersion: v1 52 kind: Service 53 metadata: 54 labels: 55 kubernetes.io/cluster-service: 'true' 56 kubernetes.io/name: monitoring-grafana 57 annotations: 58 prometheus.io/scrape: 'true' 59 prometheus.io/tcp-probe: 'true' 60 prometheus.io/tcp-probe-port: '80' 61 name: monitoring-grafana 62 namespace: monitoring 63 spec: 64 type: NodePort 65 ports: 66 - port: 80 67 targetPort: 3000 68 nodePort: 30002 69 selector: 70 k8s-app: grafana
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster01 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster02 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster03 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl create -f grafana.yaml
[root@k8smaster01 examples]# kubectl get all -n monitoring
4.4 Verify Prometheus
Direct browser access: http://172.24.8.100:30002/
4.4 grafana Configuration
- Add data source: omit
- Create user: omit
Tip: All grafana configurations are configurable reference: https://grafana.com/docs/grafana/latest/installation/configuration/.
4.5 View Monitoring
Browser Access Again: http://172.24.8.100:30002/
Five Log Management
Note: The following steps are brief and refer to "051. Cluster Management - Log Management" for details.
5.1 Get deployment files
[root@k8smaster01 ~]# git clone https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-elasticsearch
[root@k8smaster01 ~]# cd fluentd-elasticsearch/
5.2 Modify related sources
[root@k8smaster01 ~]# sed -i "s/quay.io/quay-mirror.qiniu.com/g" `grep quay.io -rl ./*.yaml`
[root@k8smaster01 fluentd-elasticsearch]# vi es-statefulset.yaml
1 ...... 2 - image: quay-mirror.qiniu.com/fluentd_elasticsearch/elasticsearch:v7.3.2 3 name: elasticsearch-logging 4 imagePullPolicy: IfNotPresent #Add Mirror Policy 5 ......
[root@k8smaster01 fluentd-elasticsearch]# cat fluentd-es-ds.yaml
1 ...... 2 image: quay-mirror.qiniu.com/fluentd_elasticsearch/fluentd:v2.7.0 3 imagePullPolicy: IfNotPresent #Add Mirror Policy 4 ......
[root@k8smaster01 fluentd-elasticsearch]# cat kibana-deployment.yaml
1...2 image: docker.elastic.co/kibana/kibana-oss:7.3.2 #Modified to match version 3 imagePullPolicy: IfNotPresent #Add Mirror Policy 4...
5.3 Create lasting PVC
[root@k8smaster01 fluentd-elasticsearch]# vi elasticsearch-pvc.yaml
1 apiVersion: v1 2 kind: PersistentVolumeClaim 3 metadata: 4 name: elasticsearch-pvc 5 namespace: kube-system 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessModes: 10 - ReadWriteMany 11 resources: 12 requests: 13 storage: 5Gi
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f elasticsearch-pvc.yaml
5.4 Deployment of elasticsearch
[root@k8smaster01 fluentd-elasticsearch]# vi es-statefulset.yaml
1 apiVersion: v1 2 kind: ServiceAccount 3 metadata: 4 name: elasticsearch-logging 5 namespace: kube-system 6 labels: 7 k8s-app: elasticsearch-logging 8 addonmanager.kubernetes.io/mode: Reconcile 9 --- 10 kind: ClusterRole 11 apiVersion: rbac.authorization.k8s.io/v1 12 metadata: 13 name: elasticsearch-logging 14 labels: 15 k8s-app: elasticsearch-logging 16 addonmanager.kubernetes.io/mode: Reconcile 17 rules: 18 - apiGroups: 19 - "" 20 resources: 21 - "services" 22 - "namespaces" 23 - "endpoints" 24 verbs: 25 - "get" 26 --- 27 kind: ClusterRoleBinding 28 apiVersion: rbac.authorization.k8s.io/v1 29 metadata: 30 namespace: kube-system 31 name: elasticsearch-logging 32 labels: 33 k8s-app: elasticsearch-logging 34 addonmanager.kubernetes.io/mode: Reconcile 35 subjects: 36 - kind: ServiceAccount 37 name: elasticsearch-logging 38 namespace: kube-system 39 apiGroup: "" 40 roleRef: 41 kind: ClusterRole 42 name: elasticsearch-logging 43 apiGroup: "" 44 --- 45 apiVersion: apps/v1 46 kind: StatefulSet 47 metadata: 48 name: elasticsearch-logging 49 namespace: kube-system 50 labels: 51 k8s-app: elasticsearch-logging 52 version: v7.3.2 53 addonmanager.kubernetes.io/mode: Reconcile 54 spec: 55 serviceName: elasticsearch-logging 56 replicas: 1 57 selector: 58 matchLabels: 59 k8s-app: elasticsearch-logging 60 version: v7.3.2 61 template: 62 metadata: 63 labels: 64 k8s-app: elasticsearch-logging 65 version: v7.3.2 66 spec: 67 serviceAccountName: elasticsearch-logging 68 containers: 69 - image: quay-mirror.qiniu.com/fluentd_elasticsearch/elasticsearch:v7.3.2 70 name: elasticsearch-logging 71 imagePullPolicy: IfNotPresent 72 resources: 73 limits: 74 cpu: 1000m 75 memory: 3Gi 76 requests: 77 cpu: 100m 78 memory: 3Gi 79 ports: 80 - containerPort: 9200 81 name: db 82 protocol: TCP 83 - containerPort: 9300 84 name: transport 85 protocol: TCP 86 volumeMounts: 87 - name: elasticsearch-logging 88 mountPath: /data 89 env: 90 - name: "NAMESPACE" 91 valueFrom: 92 fieldRef: 93 fieldPath: metadata.namespace 94 volumes: 95 - name: elasticsearch-logging #Mount permanent storage PVC 96 persistentVolumeClaim: 97 claimName: elasticsearch-pvc 98 initContainers: 99 - image: alpine:3.6 100 command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] 101 name: elasticsearch-logging-init 102 securityContext: 103 privileged: true
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f es-statefulset.yaml
5.5 Deployment of Elasticsearch SVC
[root@k8smaster01 fluentd-elasticsearch]#vi es-service.yaml #official default
1 apiVersion: v1 2 kind: Service 3 metadata: 4 name: elasticsearch-logging 5 namespace: kube-system 6 labels: 7 k8s-app: elasticsearch-logging 8 kubernetes.io/cluster-service: "true" 9 addonmanager.kubernetes.io/mode: Reconcile 10 kubernetes.io/name: "Elasticsearch" 11 spec: 12 ports: 13 - port: 9200 14 protocol: TCP 15 targetPort: db 16 selector: 17 k8s-app: elasticsearch-logging
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f es-service.yaml
5.6 Deployment fluentd
[root@k8smaster01 fluentd-elasticsearch]# kubectl create-f fluentd-es-configmap.yaml #Create fluentd ConfigMap
[root@k8smaster01 fluentd-elasticsearch]# kubectl create-f fluentd-es-ds.yaml #Deploy fluentd
5.7 Deployment of Kibana
[root@k8smaster01 fluentd-elasticsearch]# vi kibana-deployment.yaml #Make the following modifications
1 apiVersion: apps/v1 2 kind: Deployment 3 metadata: 4 name: kibana-logging 5 namespace: kube-system 6 labels: 7 k8s-app: kibana-logging 8 addonmanager.kubernetes.io/mode: Reconcile 9 spec: 10 replicas: 1 11 selector: 12 matchLabels: 13 k8s-app: kibana-logging 14 template: 15 metadata: 16 labels: 17 k8s-app: kibana-logging 18 annotations: 19 seccomp.security.alpha.kubernetes.io/pod: 'docker/default' 20 spec: 21 containers: 22 - name: kibana-logging 23 image: docker.elastic.co/kibana/kibana-oss:7.3.2 24 imagePullPolicy: IfNotPresent 25 resources: 26 limits: 27 cpu: 1000m 28 requests: 29 cpu: 100m 30 env: 31 - name: ELASTICSEARCH_HOSTS 32 value: http://elasticsearch-logging:9200 33 ports: 34 - containerPort: 5601 35 name: ui 36 protocol: TCP
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f kibana-deployment.yaml
5.8 Deployment of Kibana SVC
[root@k8smaster01 fluentd-elasticsearch]# vi kibana-service.yaml
1 apiVersion: v1 2 kind: Service 3 metadata: 4 name: kibana-logging 5 namespace: kube-system 6 labels: 7 k8s-app: kibana-logging 8 kubernetes.io/cluster-service: "true" 9 addonmanager.kubernetes.io/mode: Reconcile 10 kubernetes.io/name: "Kibana" 11 spec: 12 type: NodePort 13 ports: 14 - port: 5601 15 protocol: TCP 16 nodePort: 30003 17 targetPort: ui 18 selector: 19 k8s-app: kibana-logging
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f kibana-service.yaml
[root@k8smaster01 fluentd-elasticsearch]# kubectl get pods-n kube-system-o wide | grep-E'NAME|elasticsearch|fluentd|kibana'# View related resources
5.9 Confirmation Verification
Direct browser access: http://172.24.8.100:30003/