From 638fb5b3c4cb69c9ed37febf16602583c6e174e8 Mon Sep 17 00:00:00 2001
From: wxin <15253413025@163.com>
Date: Sun, 11 Aug 2024 21:13:30 +0800
Subject: [PATCH] =?UTF-8?q?=E5=88=A0=E9=99=A4=20kubernetes-MD/=E5=9F=BA?=
=?UTF-8?q?=E4=BA=8Ekubernetes=E9=83=A8=E7=BD=B2Prometheus=E5=92=8CGrafana?=
=?UTF-8?q?.md?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../基于kubernetes部署Prometheus和Grafana.md | 683 ------------------
1 file changed, 683 deletions(-)
delete mode 100644 kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md
diff --git a/kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md b/kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md
deleted file mode 100644
index a9d5396..0000000
--- a/kubernetes-MD/基于kubernetes部署Prometheus和Grafana.md
+++ /dev/null
@@ -1,683 +0,0 @@
-
基于kubernetes部署Prometheus和Grafana
-
-
-------
-
-## 一:环境准备
-
-#### 1.kubernetes集群正常
-
-```shell
-[root@master ~]# kubectl get node
-NAME STATUS ROLES AGE VERSION
-master Ready control-plane,master 36d v1.23.1
-node-1 Ready 36d v1.23.1
-node-2 Ready 36d v1.23.1
-node-3 Ready 36d v1.23.1
-```
-
-#### 2.harbor仓库正常
-
-![image-20220602010601512](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602010601512.png)
-
-## 二:Prometheus部署
-
-#### 1.node-exporter部署
-
- node-exporter可以采集机器(物理机、虚拟机、云主机等)的监控指标数据,能够采集到的指标包括CPU, 内存,磁盘,网络,文件数等信息
-
-创建监控namespace:
-
-```shell
-[root@master ~]# kubectl create ns monitor-sa
-```
-
-创建node-export.yaml:
-
-```shell
-[root@master ~]# vim node-export.yaml
-apiVersion: apps/v1
-kind: DaemonSet # 可以保证k8s集群的每个节点都运行完全一样的pod
-metadata:
- name: node-exporter
- namespace: monitor-sa
- labels:
- name: node-exporter
-spec:
- selector:
- matchLabels:
- name: node-exporter
- template:
- metadata:
- labels:
- name: node-exporter
- spec:
- hostPID: true
- hostIPC: true
- hostNetwork: true
- containers:
- - name: node-exporter
- image: prom/node-exporter:v0.16.0
- #image: 10.0.0.230/xingdian/node-exporter:v0.16.0
- ports:
- - containerPort: 9100
- resources:
- requests:
- cpu: 0.15 # 这个容器运行至少需要0.15核cpu
- securityContext:
- privileged: true # 开启特权模式
- args:
- - --path.procfs
- - /host/proc
- - --path.sysfs
- - /host/sys
- - --collector.filesystem.ignored-mount-points
- - '"^/(sys|proc|dev|host|etc)($|/)"'
- volumeMounts:
- - name: dev
- mountPath: /host/dev
- - name: proc
- mountPath: /host/proc
- - name: sys
- mountPath: /host/sys
- - name: rootfs
- mountPath: /rootfs
- tolerations:
- - key: "node-role.kubernetes.io/master"
- operator: "Exists"
- effect: "NoSchedule"
- volumes:
- - name: proc
- hostPath:
- path: /proc
- - name: dev
- hostPath:
- path: /dev
- - name: sys
- hostPath:
- path: /sys
- - name: rootfs
- hostPath:
- path: /
-```
-
-注意:
-
- hostNetwork、hostIPC、hostPID都为True时,表示这个Pod里的所有容器,会直接使用宿主机的网络,直接与宿主机进行IPC(进程间通信)通信,可以看到宿主机里正在运行的所有进程。加入了hostNetwork:true会直接将我们的宿主机的9100端口映射出来,从而不需要创建service 在我们的宿主机上就会有一个9100的端口
-
-创建:
-
-```shell
-[root@master ~]# kubectl apply -f node-export.yaml
-```
-
-查看node-exporter是否部署成功:
-
-```shell
-[root@master ~]# kubectl get pods -n monitor-sa -o wide
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-node-exporter-2cbrg 1/1 Running 0 34m 10.0.0.220 master
-node-exporter-7rrbh 1/1 Running 0 34m 10.0.0.222 node-2
-node-exporter-96v29 1/1 Running 0 34m 10.0.0.221 node-1
-node-exporter-bf2j8 1/1 Running 0 34m 10.0.0.223 node-3
-```
-
-注意:
-
- node-export默认的监听端口是9100,可以看到当前主机获取到的所有监控数据
-
-```shell
-[root@master ~]# curl http://10.0.0.220:9100/metrics | grep node_cpu_seconds
- % Total % Received % Xferd Average Speed Time Time Time Current
- Dload Upload Total Spent Left Speed
- 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0# HELP node_cpu_seconds_total Seconds the cpus spent in each mode.
-# TYPE node_cpu_seconds_total counter
-node_cpu_seconds_total{cpu="0",mode="idle"} 8398.49
-node_cpu_seconds_total{cpu="0",mode="iowait"} 1.54
-node_cpu_seconds_total{cpu="0",mode="irq"} 0
-node_cpu_seconds_total{cpu="0",mode="nice"} 0
-node_cpu_seconds_total{cpu="0",mode="softirq"} 17.2
-node_cpu_seconds_total{cpu="0",mode="steal"} 0
-node_cpu_seconds_total{cpu="0",mode="system"} 70.61
-node_cpu_seconds_total{cpu="0",mode="user"} 187.04
-node_cpu_seconds_total{cpu="1",mode="idle"} 8403.82
-node_cpu_seconds_total{cpu="1",mode="iowait"} 4.95
-node_cpu_seconds_total{cpu="1",mode="irq"} 0
-node_cpu_seconds_total{cpu="1",mode="nice"} 0
-node_cpu_seconds_total{cpu="1",mode="softirq"} 16.75
-node_cpu_seconds_total{cpu="1",mode="steal"} 0
-node_cpu_seconds_total{cpu="1",mode="system"} 71.26
-node_cpu_seconds_total{cpu="1",mode="user"} 190.27
-100 74016 100 74016 0 0 5878k 0 --:--:-- --:--:-- --:--:-- 6023k
-
-[root@master ~]# curl http://10.0.0.220:9100/metrics | grep node_load
- % Total % Received % Xferd Average Speed Time Time Time Current
- Dload Upload Total Spent Left Speed
- 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0# HELP node_loa
-1 1m load average.
-# TYPE node_load1 gauge
-node_load1 0.2
-# HELP node_load15 15m load average.
-# TYPE node_load15 gauge
-node_load15 0.22
-# HELP node_load5 5m load average.
-# TYPE node_load5 gauge
-node_load5 0.2
-100 74044 100 74044 0 0 8604k 0 --:--:-- --:--:-- --:--:-- 9038k
-```
-
-#### 2.Prometheus安装
-
-创建sa账号,对sa做rbac授权:
-
-```shell
-# 创建一个sa账号monitor
-[root@master ~]# kubectl create serviceaccount monitor -n monitor-sa
-
-# 把sa账号monitor通过clusterrolebing绑定到clusterrole上
-[root@master ~]# kubectl create clusterrolebinding monitor-clusterrolebinding -n monitor-sa --clusterrole=cluster-admin --serviceaccount=monitor-sa:monitor
-```
-
-创建prometheus数据存储目录:
-
-```shell
-# 将prometheus调度到node-1节点
-[root@node-1 ~]# mkdir /data && chmod 777 /data
-```
-
-创建一个configmap存储卷,用来存放prometheus配置信息:
-
-```shell
-[root@master ~]# vim prometheus-cfg.yaml
----
-kind: ConfigMap
-apiVersion: v1
-metadata:
- labels:
- app: prometheus
- name: prometheus-config
- namespace: monitor-sa
-data:
- prometheus.yml: |
- global:
- scrape_interval: 15s
- scrape_timeout: 10s
- evaluation_interval: 1m
- scrape_configs:
- - job_name: 'kubernetes-node'
- kubernetes_sd_configs:
- - role: node
- relabel_configs:
- - source_labels: [__address__]
- regex: '(.*):10250'
- replacement: '${1}:9100'
- target_label: __address__
- action: replace
- - action: labelmap
- regex: __meta_kubernetes_node_label_(.+)
- - job_name: 'kubernetes-node-cadvisor'
- kubernetes_sd_configs:
- - role: node
- scheme: https
- tls_config:
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
- relabel_configs:
- - action: labelmap
- regex: __meta_kubernetes_node_label_(.+)
- - target_label: __address__
- replacement: kubernetes.default.svc:443
- - source_labels: [__meta_kubernetes_node_name]
- regex: (.+)
- target_label: __metrics_path__
- replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- - job_name: 'kubernetes-apiserver'
- kubernetes_sd_configs:
- - role: endpoints
- scheme: https
- tls_config:
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
- relabel_configs:
- - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
- action: keep
- regex: default;kubernetes;https
- - job_name: 'kubernetes-service-endpoints'
- kubernetes_sd_configs:
- - role: endpoints
- relabel_configs:
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
- action: keep
- regex: true
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
- action: replace
- target_label: __scheme__
- regex: (https?)
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
- action: replace
- target_label: __metrics_path__
- regex: (.+)
- - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
- action: replace
- target_label: __address__
- regex: ([^:]+)(?::\d+)?;(\d+)
- replacement: $1:$2
- - action: labelmap
- regex: __meta_kubernetes_service_label_(.+)
- - source_labels: [__meta_kubernetes_namespace]
- action: replace
- target_label: kubernetes_namespace
- - source_labels: [__meta_kubernetes_service_name]
- action: replace
- target_label: kubernetes_name
-```
-
-创建:
-
-```shell
-[root@master ~]# kubectl apply -f prometheus-cfg.yaml
-configmap/prometheus-config created
-```
-
-配置详解:
-
-```shell
----
-kind: ConfigMap
-apiVersion: v1
-metadata:
- labels:
- app: prometheus
- name: prometheus-config
- namespace: monitor-sa
-data:
- prometheus.yml: |
- global:
- scrape_interval: 15s #采集目标主机监控据的时间间隔
- scrape_timeout: 10s # 数据采集超时时间,默认10s
- evaluation_interval: 1m #触发告警检测的时间,默认是1m
- scrape_configs: # 配置数据源,称为target,每个target用job_name命名。又分为静态配置和服务发现
- - job_name: 'kubernetes-node'
- kubernetes_sd_configs: # 使用的是k8s的服务发现
- - role: node # 使用node角色,它使用默认的kubelet提供的http端口来发现集群中每个node节点
- relabel_configs: # 重新标记
- - source_labels: [__address__] # 配置的原始标签,匹配地址
- regex: '(.*):10250' #匹配带有10250端口的url
- replacement: '${1}:9100' #把匹配到的ip:10250的ip保留
- target_label: __address__ #新生成的url是${1}获取到的ip:9100
- action: replace # 动作替换
- - action: labelmap
- regex: __meta_kubernetes_node_label_(.+) #匹配到下面正则表达式的标签会被保留,如果不做regex正则的话,默认只是会显示instance标签
- - job_name: 'kubernetes-node-cadvisor' # 抓取cAdvisor数据,是获取kubelet上/metrics/cadvisor接口数据来获取容器的资源使用情况
- kubernetes_sd_configs:
- - role: node
- scheme: https
- tls_config:
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
- relabel_configs:
- - action: labelmap # 把匹配到的标签保留
- regex: __meta_kubernetes_node_label_(.+) #保留匹配到的具有__meta_kubernetes_node_label的标签
- - target_label: __address__ # 获取到的地址:__address__="192.168.40.180:10250"
- replacement: kubernetes.default.svc:443 # 把获取到的地址替换成新的地址kubernetes.default.svc:443
- - source_labels: [__meta_kubernetes_node_name]
- regex: (.+) # 把原始标签中__meta_kubernetes_node_name值匹配到
- target_label: __metrics_path__ #获取__metrics_path__对应的值
- replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- # 把metrics替换成新的值api/v1/nodes/k8s-master1/proxy/metrics/cadvisor
- # ${1}是__meta_kubernetes_node_name获取到的值
- # 新的url就是https://kubernetes.default.svc:443/api/v1/nodes/k8s-master1/proxy/metrics/cadvisor
- - job_name: 'kubernetes-apiserver'
- kubernetes_sd_configs:
- - role: endpoints # 使用k8s中的endpoint服务发现,采集apiserver 6443端口获取到的数据
- scheme: https
- tls_config:
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
- relabel_configs:
- - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
- # endpoint这个对象的名称空间,endpoint对象的服务名,exnpoint的端口名称
- action: keep # 采集满足条件的实例,其他实例不采集
- regex: default;kubernetes;https #正则匹配到的默认空间下的service名字是kubernetes,协议是https的endpoint类型保留下来
- - job_name: 'kubernetes-service-endpoints'
- kubernetes_sd_configs:
- - role: endpoints
- relabel_configs:
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
- action: keep
- regex: true
- # 重新打标仅抓取到的具有 "prometheus.io/scrape: true" 的annotation的端点,意思是说如果某个service具有prometheus.io/scrape = true annotation声明则抓取,annotation本身也是键值结构,所以这里的源标签设置为键,而regex设置值true,当值匹配到regex设定的内容时则执行keep动作也就是保留,其余则丢弃。
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
- action: replace
- target_label: __scheme__
- regex: (https?)
- # 重新设置scheme,匹配源标签__meta_kubernetes_service_annotation_prometheus_io_scheme也就是prometheus.io/scheme annotation,如果源标签的值匹配到regex,则把值替换为__scheme__对应的值。
- - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
- action: replace
- target_label: __metrics_path__
- regex: (.+)
- # 应用中自定义暴露的指标,也许你暴露的API接口不是/metrics这个路径,那么你可以在这个POD对应的service中做一个"prometheus.io/path = /mymetrics" 声明,上面的意思就是把你声明的这个路径赋值给__metrics_path__,其实就是让prometheus来获取自定义应用暴露的metrices的具体路径,不过这里写的要和service中做好约定,如果service中这样写 prometheus.io/app-metrics-path: '/metrics' 那么你这里就要__meta_kubernetes_service_annotation_prometheus_io_app_metrics_path这样写。
- - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
- action: replace
- target_label: __address__
- regex: ([^:]+)(?::\d+)?;(\d+)
- replacement: $1:$2
- # 暴露自定义的应用的端口,就是把地址和你在service中定义的 "prometheus.io/port = " 声明做一个拼接,然后赋值给__address__,这样prometheus就能获取自定义应用的端口,然后通过这个端口再结合__metrics_path__来获取指标,如果__metrics_path__值不是默认的/metrics那么就要使用上面的标签替换来获取真正暴露的具体路径。
- - action: labelmap #保留下面匹配到的标签
- regex: __meta_kubernetes_service_label_(.+)
- - source_labels: [__meta_kubernetes_namespace]
- action: replace # 替换__meta_kubernetes_namespace变成kubernetes_namespace
- target_label: kubernetes_namespace
- - source_labels: [__meta_kubernetes_service_name]
- action: replace
- target_label: kubernetes_name
-```
-
-通过deployment部署prometheus:
-
-```shell
-[root@master ~]# cat prometheus-deploy.yaml
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: prometheus-server
- namespace: monitor-sa
- labels:
- app: prometheus
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: prometheus
- component: server
- #matchExpressions:
- #- {key: app, operator: In, values: [prometheus]}
- #- {key: component, operator: In, values: [server]}
- template:
- metadata:
- labels:
- app: prometheus
- component: server
- annotations:
- prometheus.io/scrape: 'false'
- spec:
- nodeName: node-1 # 指定pod调度到哪个节点上
- serviceAccountName: monitor
- containers:
- - name: prometheus
- image: prom/prometheus:v2.2.1
- #image: 10.0.0.230/xingdian/prometheus:v2.2.1
- imagePullPolicy: IfNotPresent
- command:
- - prometheus
- - --config.file=/etc/prometheus/prometheus.yml
- - --storage.tsdb.path=/prometheus # 数据存储目录
- - --storage.tsdb.retention=720h # 数据保存时长
- - --web.enable-lifecycle # 开启热加载
- ports:
- - containerPort: 9090
- protocol: TCP
- volumeMounts:
- - mountPath: /etc/prometheus/prometheus.yml
- name: prometheus-config
- subPath: prometheus.yml
- - mountPath: /prometheus/
- name: prometheus-storage-volume
- volumes:
- - name: prometheus-config
- configMap:
- name: prometheus-config
- items:
- - key: prometheus.yml
- path: prometheus.yml
- mode: 0644
- - name: prometheus-storage-volume
- hostPath:
- path: /data
- type: Directory
-```
-
-创建:
-
-```shell
-[root@master ~]# kubectl apply -f prometheus-deploy.yaml
-deployment.apps/prometheus-server created
-```
-
-查看:
-
-```shell
-[root@master ~]# kubectl get pods -o wide -n monitor-sa
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-prometheus-server-59cb5d648-bxwrb 1/1 Running 0 14m 10.244.2.100 node-1
-```
-
-#### 3.prometheus pod创建service
-
-```shell
-[root@master ~]# cat prometheus-svc.yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: prometheus
- namespace: monitor-sa
- labels:
- app: prometheus
-spec:
- type: NodePort
- ports:
- - port: 9090
- targetPort: 9090
- protocol: TCP
- selector:
- app: prometheus
- component: server
-```
-
-创建:
-
-```shell
-[root@master ~]# kubectl apply -f prometheus-svc.yaml
-service/prometheus created
-```
-
-查看service在物理机映射的端口:
-
-```shell
-[root@master ~]# kubectl get svc -n monitor-sa
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-prometheus NodePort 10.106.61.80 9090:32169/TCP 32m
-```
-
-#### 4.web界面查看
-
-![image-20220602011956600](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602011956600.png)
-
-![image-20220602012012382](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602012012382.png)
-
-#### 5.Prometheus热加载
-
-```shell
-# 为了每次修改配置文件可以热加载prometheus,也就是不停止prometheus,就可以使配置生效,想要使配置生效可用如下热加载命令:
-[root@master ~]# kubectl get pods -n monitor-sa -o wide -l app=prometheus
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-prometheus-server-689fb8cdbc-kcsw2 1/1 Running 0 5m39s 10.244.36.70 k8s-node1
-
-# 想要使配置生效可用如下命令热加载:
-[root@master ~]# curl -X POST http://10.244.36.70:9090/-/reload
-
-# 查看log
-[root@master ~]# kubectl logs -n monitor-sa prometheus-server-689fb8cdbc-kcsw2
-```
-
-注意:
-
-```shell
-# 热加载速度比较慢,可以暴力重启prometheus,如修改上面的prometheus-cfg.yaml文件之后,可执行如下强制删除:
-[root@master ~]# kubectl delete -f prometheus-cfg.yaml
-[root@master ~]# kubectl delete -f prometheus-deploy.yaml
-# 然后再通过apply更新:
-[root@master ~]# kubectl apply -f prometheus-cfg.yaml
-[root@master ~]# kubectl apply -f prometheus-deploy.yaml
-#注意:线上最好热加载,暴力删除可能造成监控数据的丢失
-```
-
-## 三:Grafana的部署
-
-#### 1.Grafana介绍
-
-Grafana是一个跨平台的开源的度量分析和可视化工具,可以将采集的数据可视化的展示,并及时通知给告警接收方
-
-它主要有以下六大特点:
-
-1)展示方式:快速灵活的客户端图表,面板插件有许多不同方式的可视化指标和日志,官方库中具有丰富的仪表盘插件,比如热图、折线图、图表等多种展示方式
-
-2)数据源:Graphite,InfluxDB,OpenTSDB,Prometheus,Elasticsearch,CloudWatch和KairosDB等
-
-3)通知提醒:以可视方式定义最重要指标的警报规则,Grafana将不断计算并发送通知,在数据达到阈值时通过Slack、PagerDuty等获得通知
-
-4)混合展示:在同一图表中混合使用不同的数据源,可以基于每个查询指定数据源,甚至自定义数据源
-
-5)注释:使用来自不同数据源的丰富事件注释图表,将鼠标悬停在事件上会显示完整的事件元数据和标记
-
-#### 2.Grafana安装
-
-```shell
-[root@master prome]# cat grafana.yaml
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: monitoring-grafana
- namespace: kube-system
-spec:
- replicas: 1
- selector:
- matchLabels:
- task: monitoring
- k8s-app: grafana
- template:
- metadata:
- labels:
- task: monitoring
- k8s-app: grafana
- spec:
- containers:
- - name: grafana
- image: 10.0.0.230/xingdian/heapster-grafana-amd64:v5.0.4
- #heleicool/heapster-grafana-amd64:v5.0.4
- ports:
- - containerPort: 3000
- protocol: TCP
- volumeMounts:
- - mountPath: /etc/ssl/certs
- name: ca-certificates
- readOnly: true
- - mountPath: /var
- name: grafana-storage
- env:
- - name: INFLUXDB_HOST
- value: monitoring-influxdb
- - name: GF_SERVER_HTTP_PORT
- value: "3000"
- # The following env variables are required to make Grafana accessible via
- # the kubernetes api-server proxy. On production clusters, we recommend
- # removing these env variables, setup auth for grafana, and expose the grafana
- # service using a LoadBalancer or a public IP.
- - name: GF_AUTH_BASIC_ENABLED
- value: "false"
- - name: GF_AUTH_ANONYMOUS_ENABLED
- value: "true"
- - name: GF_AUTH_ANONYMOUS_ORG_ROLE
- value: Admin
- - name: GF_SERVER_ROOT_URL
- # If you're only using the API Server proxy, set this value instead:
- # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
- value: /
- volumes:
- - name: ca-certificates
- hostPath:
- path: /etc/ssl/certs
- - name: grafana-storage
- emptyDir: {}
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
- # If you are NOT using this as an addon, you should comment out this line.
- kubernetes.io/cluster-service: 'true'
- kubernetes.io/name: monitoring-grafana
- name: monitoring-grafana
- namespace: kube-system
-spec:
- # In a production setup, we recommend accessing Grafana through an external Loadbalancer
- # or through a public IP.
- # type: LoadBalancer
- # You could also use NodePort to expose the service at a randomly-generated port
- # type: NodePort
- ports:
- - port: 80
- targetPort: 3000
- selector:
- k8s-app: grafana
- type: NodePort
-```
-
-创建:
-
-```shell
-[root@master prome]# kubectl apply -f grafana.yaml
-deployment.apps/monitoring-grafana created
-service/monitoring-grafana created
-```
-
-查看:
-
-```shell
-[root@master prome]# kubectl get pods -n kube-system -l task=monitoring -o wide
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-monitoring-grafana-7c5c6c7486-rbt62 1/1 Running 0 9s 10.244.1.83 node-3
-```
-
-```shell
-[root@master prome]# kubectl get svc -n kube-system | grep grafana
-monitoring-grafana NodePort 10.101.77.194 80:30919/TCP 76s
-```
-
-## 四:配置Grafana
-
-浏览器访问:
-
-![image-20220602013222284](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013222284.png)
-
-添加数据源:
-
-![image-20220602013322234](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013322234.png)
-
-指定Prometheus地址:
-
-![image-20220602013441712](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013441712.png)
-
-导入监控模板:
-
-![image-20220602013943317](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013943317.png)
-
-![image-20220602014027197](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014027197.png)
-
-注意:
-
-官方下载监控模板:https://grafana.com/dashboards?dataSource=prometheus&search=kubernetes
-
-![image-20220602014152927](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014152927.png)
-
-![image-20220602014212551](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014212551.png)
-
-展示:
-
-![image-20220602014306247](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014306247.png)
-
-![image-20220602014321106](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014321106.png)
-
-![image-20220602014337431](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014337431.png)
\ No newline at end of file