上传文件至 kubernetes-MD

This commit is contained in:
wxin 2024-08-11 21:07:12 +08:00
parent 8e597235b4
commit a70a59b867
4 changed files with 2140 additions and 0 deletions

View File

@ -0,0 +1,807 @@
<h1><center>利用kubernetes部署微服务项目</center></h1>
著作:行癫 <盗版必究>
------
## 一:环境准备
#### 1.kubernetes集群环境
集群环境检查
```shell
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 11d v1.23.1
node-1 Ready <none> 11d v1.23.1
node-2 Ready <none> 11d v1.23.1
node-3 Ready <none> 11d v1.23.1
```
#### 2.harbor环境
harbor环境检查
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220508222722564.png" alt="image-20220508222722564" style="zoom:50%;" />
## 二:项目准备
#### 1.项目包
#### 2.项目端口准备
| 服务 | 内部端口 | 外部端口 |
| :---------------------: | :------: | -------- |
| tensquare_eureka_server | 10086 | 30020 |
| tensquare_zuul | 10020 | 30021 |
| tensquare_admin_service | 9001 | 30024 |
| tensquare_gathering | 9002 | 30022 |
| mysql | 3306 | 30023 |
## 三:项目部署
#### 1.eureka部署
application.yml文件修改
```
spring:
application:
name: EUREKA-HA
---
#单机配置
server:
port: 10086
eureka:
instance:
hostname: localhost
client:
register-with-eureka: false
fetch-registry: false
service-url:
defaultZone: http://${eureka.instance.hostname}:${server.port}/eureka/
#defaultZone: http://pod主机名称.service名称:端口/eureka/
```
Dockerfile创建
```shell
[root@nfs-harbor jdk]# ls
Dockerfile tensquare_eureka_server-1.0-SNAPSHOT.jar jdk-8u211-linux-x64.tar.gz
[root@nfs-harbor jdk]# cat Dockerfile
FROM xingdian
MAINTAINER "xingdian" <xingdian@gmail.com>
ADD jdk-8u211-linux-x64.tar.gz /usr/local/
RUN mv /usr/local/jdk1.8.0_211 /usr/local/java
ENV JAVA_HOME /usr/local/java/
ENV PATH $PATH:$JAVA_HOME/bin
COPY tensquare_eureka_server-1.0-SNAPSHOT.jar /usr/local
EXPOSE 10086
CMD java -jar /usr/local/tensquare_eureka_server-1.0-SNAPSHOT.jar
```
镜像构建:
```shell
[root@nfs-harbor jdk]# docker build -t eureka:v2022.1 .
```
上传到镜像仓库:
```shell
[root@nfs-harbor jdk]# docker tag eureka:v2022.1 10.0.0.230/xingdian/eureka:v2022.1
[root@nfs-harbor jdk]# docker push 10.0.0.230/xingdian/eureka:v2022.1
```
仓库验证:
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220508224930884.png" alt="image-20220508224930884" style="zoom:50%;" />
#### 2.tensquare_zuul部署
Dockerfile创建
```shell
[root@nfs-harbor jdk]# cat Dockerfile
FROM xingdian
MAINTAINER "xingdian" <xingdian@gmail.com>
ADD jdk-8u211-linux-x64.tar.gz /usr/local/
RUN mv /usr/local/jdk1.8.0_211 /usr/local/java
ENV JAVA_HOME /usr/local/java/
ENV PATH $PATH:$JAVA_HOME/bin
COPY tensquare_zuul-1.0-SNAPSHOT.jar /usr/local
EXPOSE 10020
CMD java -jar /usr/local/tensquare_zuul-1.0-SNAPSHOT.jar
```
镜像构建:
```shell
[root@nfs-harbor jdk]# docker build -t zuul:v2022.1 .
```
镜像上传:
```shell
[root@nfs-harbor jdk]# docker tag zuul:v2022.1 10.0.0.230/xingdian/zuul:v2022.1
[root@nfs-harbor jdk]# docker push 10.0.0.230/xingdian/zuul:v2022.1
```
仓库验证:
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220508230055752.png" alt="image-20220508230055752" style="zoom:50%;" />
注意:
在构建之前使用vim修改源码jar包修改的内容如下文件application.yml
```yml
server:
port: 10020 # 端口
# 基本服务信息
spring:
application:
name: tensquare-zuul # 服务ID
# Eureka配置
eureka:
client:
service-url:
#defaultZone: http://192.168.66.103:10086/eureka,http://192.168.66.104:10086/eureka # Eureka访问地址
#tensquare_eureka_server地址和端口(修改)
defaultZone: http://10.0.0.220:30020/eureka
instance:
prefer-ip-address: true
# 修改ribbon的超时时间
ribbon:
ConnectTimeout: 1500 # 连接超时时间默认500ms
ReadTimeout: 3000 # 请求超时时间默认1000ms
# 修改hystrix的熔断超时时间
hystrix:
command:
default:
execution:
isolation:
thread:
timeoutInMillisecond: 2000 # 熔断超时时长默认1000ms
# 网关路由配置
zuul:
routes:
admin:
path: /admin/**
serviceId: tensquare-admin-service
gathering:
path: /gathering/**
serviceId: tensquare-gathering
# jwt参数
jwt:
config:
key: itcast
ttl: 1800000
```
#### 3.mysql部署
镜像获取(使用官方镜像):
```shell
[root@nfs-harbor mysql]# docker pull mysql:5.7.38
```
镜像上传:
```shell
[root@nfs-harbor mysql]# docker tag mysql:5.7.38 10.0.0.230/xingdian/mysql:v1
[root@nfs-harbor mysql]# docker push 10.0.0.230/xingdian/mysql:v1
```
#### 4.admin_service部署
Dockerfile创建
```shell
[root@nfs-harbor jdk]# cat Dockerfile
FROM xingdian
MAINTAINER "xingdian" <xingdian@gmail.com>
ADD jdk-8u211-linux-x64.tar.gz /usr/local/
RUN mv /usr/local/jdk1.8.0_211 /usr/local/java
ENV JAVA_HOME /usr/local/java/
ENV PATH $PATH:$JAVA_HOME/bin
COPY tensquare_admin_service-1.0-SNAPSHOT.jar /usr/local
EXPOSE 9001
CMD java -jar /usr/local/tensquare_admin_service-1.0-SNAPSHOT.jar
```
镜像构建:
```shell
[root@nfs-harbor jdk]# docker build -t admin_service:v2022.1 .
```
镜像上传:
```shell
[root@nfs-harbor jdk]# docker tag admin_service:v2022.1 10.0.0.230/xingdian/admin_service:v2022.1
[root@nfs-harbor jdk]# docker push 10.0.0.230/xingdian/admin_service:v2022.1
```
注意:
在构建之前使用vim修改源码jar包修改的内容如下文件application.yml
```yml
spring:
application:
name: tensquare-admin-service #指定服务名
datasource:
driverClassName: com.mysql.jdbc.Driver
#数据库地址(修改)
url: jdbc:mysql://10.0.0.220:30023/tensquare_user?characterEncoding=UTF8&useSSL=false
#数据库账户名(修改)
username: root
#数据库账户密码(修改)
password: mysql
jpa:
database: mysql
show-sql: true
#Eureka配置
eureka:
client:
service-url:
#defaultZone: http://192.168.66.103:10086/eureka,http://192.168.66.104:10086/eureka
##tensquare_eureka_server地址和端口(修改)
defaultZone: http://10.0.0.220:30020/eureka
instance:
lease-renewal-interval-in-seconds: 5 # 每隔5秒发送一次心跳
lease-expiration-duration-in-seconds: 10 # 10秒不发送就过期
prefer-ip-address: true
# jwt参数
jwt:
config:
key: itcast
ttl: 1800000
```
#### 5.gathering部署
Dockerfile创建
```shell
[root@nfs-harbor jdk]# cat Dockerfile
FROM xingdian
MAINTAINER "xingdian" <xingdian@gmail.com>
ADD jdk-8u211-linux-x64.tar.gz /usr/local/
RUN mv /usr/local/jdk1.8.0_211 /usr/local/java
ENV JAVA_HOME /usr/local/java/
ENV PATH $PATH:$JAVA_HOME/bin
COPY tensquare_gathering-1.0-SNAPSHOT.jar /usr/local
CMD java -jar /usr/local/tensquare_gathering-1.0-SNAPSHOT.jar
```
镜像构建:
```shell
[root@nfs-harbor jdk]# docker build -t gathering:v2022.1 .
```
镜像上传:
```shell
[root@nfs-harbor jdk]# docker tag gathering:v2022.1 10.0.0.230/xingdian/gathering:v2022.1
[root@nfs-harbor jdk]# docker push 10.0.0.230/xingdian/gathering:v2022.1
```
仓库验证:
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220508233621370.png" alt="image-20220508233621370" style="zoom:50%;" />
注意:
```yml
server:
port: 9002
spring:
application:
name: tensquare-gathering #指定服务名
datasource:
driverClassName: com.mysql.jdbc.Driver
#数据库地址(修改)
url: jdbc:mysql://10.0.0.220:30023/tensquare_gathering?characterEncoding=UTF8&useSSL=false
#数据库地址(修改)
username: root
#数据库账户密码(修改)
password: mysql
jpa:
database: mysql
show-sql: true
#Eureka客户端配置
eureka:
client:
service-url:
#defaultZone: http://192.168.66.103:10086/eureka,http://192.168.66.104:10086/eureka
#tensquare_eureka_server地址和端口(修改)
defaultZone: http://10.0.0.220:30020/eureka
instance:
lease-renewal-interval-in-seconds: 5 # 每隔5秒发送一次心跳
lease-expiration-duration-in-seconds: 10 # 10秒不发送就过期
prefer-ip-address: true
```
## 四kubernetes集群部署
#### 1.所有镜像验证
![image-20220508233955412](https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220508233955412.png)
#### 2.部署eureka
Eureka之Deployment创建
```shell
[root@master xingdian]# cat Eureka.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: eureka-deployment
labels:
app: eureka
spec:
replicas: 1
selector:
matchLabels:
app: eureka
template:
metadata:
labels:
app: eureka
spec:
containers:
- name: nginx
image: 10.0.0.230/xingdian/eureka:v2022.1
ports:
- containerPort: 10086
---
apiVersion: v1
kind: Service
metadata:
name: eureka-service
labels:
app: eureka
spec:
type: NodePort
ports:
- port: 10086
name: eureka
targetPort: 10086
nodePort: 30020
selector:
app: eureka
```
创建:
```shell
[root@master xingdian]# kubectl create -f Eureka.yaml
deployment.apps/eureka-deployment created
service/eureka-service created
```
验证:
```shell
[root@master xingdian]# kubectl get pod
NAME READY STATUS RESTARTS AGE
eureka-deployment-69c575d95-hx8s6 1/1 Running 0 2m20s
[root@master xingdian]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
eureka-service NodePort 10.107.243.240 <none> 10086:30020/TCP 2m22s
```
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220508235409218.png" alt="image-20220508235409218" style="zoom:50%;" />
#### 3.部署zuul
zuul之Deployment创建
```shell
[root@master xingdian]# cat Zuul.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: zuul-deployment
labels:
app: zuul
spec:
replicas: 1
selector:
matchLabels:
app: zuul
template:
metadata:
labels:
app: zuul
spec:
containers:
- name: zuul
image: 10.0.0.230/xingdian/zuul:v2022.1
ports:
- containerPort: 10020
---
apiVersion: v1
kind: Service
metadata:
name: zuul-service
labels:
app: zuul
spec:
type: NodePort
ports:
- port: 10020
name: zuul
targetPort: 10086
nodePort: 30021
selector:
app: zuul
```
创建:
```shell
[root@master xingdian]# kubectl create -f Zuul.yaml
```
验证:
```shell
[root@master xingdian]# kubectl get pod
NAME READY STATUS RESTARTS AGE
eureka-deployment-69c575d95-hx8s6 1/1 Running 0 7m42s
zuul-deployment-6d76647cf9-6rmdj 1/1 Running 0 10s
[root@master xingdian]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
eureka-service NodePort 10.107.243.240 <none> 10086:30020/TCP 7m37s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 11d
zuul-service NodePort 10.103.35.255 <none> 10020:30021/TCP 5s
```
验证是否加入注册中心:
![image-20220508235634459](https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220508235634459.png)
#### 4.mysql部署
mysql之rc和svc创建
```shell
[root@master mysql]# cat mysql-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql-svc
labels:
name: mysql-svc
spec:
type: NodePort
ports:
- port: 3306
protocol: TCP
targetPort: 3306
name: http
nodePort: 30023
selector:
name: mysql-pod
[root@master mysql]# cat mysql-rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: mysql-rc
labels:
name: mysql-rc
spec:
replicas: 1
selector:
name: mysql-pod
template:
metadata:
labels:
name: mysql-pod
spec:
containers:
- name: mysql
image: 10.0.0.230/xingdian/mysql:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
value: "mysql"
```
创建:
```shell
[root@master mysql]# kubectl create -f mysql-rc.yaml
replicationcontroller/mysql-rc created
[root@master mysql]# kubectl create -f mysql-svc.yaml
service/mysql-svc created
```
验证:
```shell
[root@master mysql]# kubectl get pod
NAME READY STATUS RESTARTS AGE
eureka-deployment-69c575d95-hx8s6 1/1 Running 0 29m
mysql-rc-sbdcl 1/1 Running 0 8m41s
zuul-deployment-6d76647cf9-gpsms 1/1 Running 0 21m
[root@master mysql]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
eureka-service NodePort 10.107.243.240 <none> 10086:30020/TCP 29m
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 11d
mysql-svc NodePort 10.98.4.62 <none> 3306:30023/TCP 9m1s
zuul-service NodePort 10.103.35.255 <none> 10020:30021/TCP 22m
```
数据库创建:
```shell
[root@nfs-harbor ~]# mysql -u root -pmysql -h 10.0.0.220 -P 30023
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MySQL connection id is 2
Server version: 5.7.38 MySQL Community Server (GPL)
Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MySQL [(none)]> create database tensquare_user charset=utf8;
Query OK, 1 row affected (0.00 sec)
MySQL [(none)]> create database tensquare_gathering charset=utf8;
Query OK, 1 row affected (0.01 sec)
MySQL [(none)]> exit
Bye
```
数据导入:
```shell
[root@nfs-harbor ~]# mysql -u root -pmysql -h 10.0.0.220 -P 30023
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MySQL connection id is 3
Server version: 5.7.38 MySQL Community Server (GPL)
Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MySQL [(none)]> source /var/ftp/share/tensquare_user.sql
MySQL [tensquare_user]> source /var/ftp/share/tensquare_gathering.sql
MySQL [tensquare_gathering]> exit
Bye
```
验证:
```shell
[root@nfs-harbor ~]# mysql -u root -pmysql -h 10.0.0.220 -P 30023
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MySQL connection id is 3
Server version: 5.7.38 MySQL Community Server (GPL)
Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MySQL [(none)]> show databases;
+---------------------+
| Database |
+---------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
| tensquare_gathering |
| tensquare_user |
+---------------------+
6 rows in set (0.00 sec)
MySQL [(none)]> use tensquare_gathering
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MySQL [tensquare_gathering]> show tables;
+-------------------------------+
| Tables_in_tensquare_gathering |
+-------------------------------+
| tb_city |
| tb_gathering |
+-------------------------------+
2 rows in set (0.00 sec)
MySQL [tensquare_gathering]> use tensquare_user
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MySQL [tensquare_user]> show tables;
+--------------------------+
| Tables_in_tensquare_user |
+--------------------------+
| tb_admin |
+--------------------------+
1 row in set (0.01 sec)
```
#### 5.admin_service部署
admin_service之Deployment创建
```shell
[root@master xingdian]# cat Admin-service.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: admin-deployment
labels:
app: admin
spec:
replicas: 1
selector:
matchLabels:
app: admin
template:
metadata:
labels:
app: admin
spec:
containers:
- name: admin
image: 10.0.0.230/xingdian/admin_service:v2022.1
ports:
- containerPort: 9001
---
apiVersion: v1
kind: Service
metadata:
name: admin-service
labels:
app: admin
spec:
type: NodePort
ports:
- port: 9001
name: admin
targetPort: 9001
nodePort: 30024
selector:
app: admin
```
创建:
```shell
[root@master xingdian]# kubectl create -f Admin-service.yaml
deployment.apps/admin-deployment created
service/admin-service created
```
验证:
```shell
[root@master xingdian]# kubectl get pod
NAME READY STATUS RESTARTS AGE
admin-deployment-54c5664d69-l2lbc 1/1 Running 0 23s
eureka-deployment-69c575d95-mrj66 1/1 Running 0 47m
mysql-rc-zgxk4 1/1 Running 0 7m23s
zuul-deployment-6d76647cf9-gpsms 1/1 Running 0 39m
[root@master xingdian]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
admin-service NodePort 10.101.251.47 <none> 9001:30024/TCP 6s
eureka-service NodePort 10.107.243.240 <none> 10086:30020/TCP 47m
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 11d
mysql-svc NodePort 10.98.4.62 <none> 3306:30023/TCP 26m
zuul-service NodePort 10.103.35.255 <none> 10020:30021/TCP 39m
```
注册中心验证:
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220509013257937.png" alt="image-20220509013257937" style="zoom:50%;" />
#### 6.gathering部署
gathering之Deployment创建
```shell
[root@master xingdian]# cat Gathering.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: gathering-deployment
labels:
app: gathering
spec:
replicas: 1
selector:
matchLabels:
app: gathering
template:
metadata:
labels:
app: gathering
spec:
containers:
- name: nginx
image: 10.0.0.230/xingdian/gathering:v2022.1
ports:
- containerPort: 9002
---
apiVersion: v1
kind: Service
metadata:
name: gathering-service
labels:
app: gathering
spec:
type: NodePort
ports:
- port: 9002
name: gathering
targetPort: 9002
nodePort: 30022
selector:
app: gathering
```
创建:
```shell
[root@master xingdian]# kubectl create -f Gathering.yaml
deployment.apps/gathering-deployment created
service/gathering-service created
```
验证:
```shell
[root@master xingdian]# kubectl get pod
NAME READY STATUS RESTARTS AGE
admin-deployment-54c5664d69-2tqlw 1/1 Running 0 33s
eureka-deployment-69c575d95-xzx9t 1/1 Running 0 13m
gathering-deployment-6fcdd5d5-wbsxt 1/1 Running 0 27s
mysql-rc-zgxk4 1/1 Running 0 28m
zuul-deployment-6d76647cf9-jkm7f 1/1 Running 0 12m
```
注册中心验证:
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220509005823566.png" alt="image-20220509005823566" style="zoom:50%;" />
#### 7.浏览器测试API接口
![image-20220509015035488](https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220509015035488.png)

View File

@ -0,0 +1,335 @@
<h1><center>利用kubernetes部署网站项目</center></h1>
著作:行癫 <盗版必究>
------
## 一:环境准备
#### 1.kubernetes集群
集群正常运行,例如使用以下命令检查
```shell
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 5d19h v1.23.1
node-1 Ready <none> 5d19h v1.23.1
node-2 Ready <none> 5d19h v1.23.1
node-3 Ready <none> 5d19h v1.23.1
```
#### 2.harbor私有仓库
主要给kubernetes集群提供镜像服务
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220502184026483.png" alt="image-20220502184026483" style="zoom:50%;" />
## 二:项目部署
#### 1.镜像构建
软件下载地址:
```shell
wget https://nginx.org/download/nginx-1.20.2.tar.gz
```
项目包下载地址:
```shell
git clone https://github.com/blackmed/xingdian-project.git
```
构建centos基础镜像Dockerfile文件
```shell
root@nfs-harbor ~]# cat Dockerfile
FROM daocloud.io/centos:7
MAINTAINER "xingdianvip@gmail.com"
ENV container docker
RUN yum -y swap -- remove fakesystemd -- install systemd systemd-libs
RUN yum -y update; yum clean all; \
(cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
rm -f /lib/systemd/system/multi-user.target.wants/*;\
rm -f /etc/systemd/system/*.wants/*;\
rm -f /lib/systemd/system/local-fs.target.wants/*; \
rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
rm -f /lib/systemd/system/basic.target.wants/*;\
rm -f /lib/systemd/system/anaconda.target.wants/*;
VOLUME [ "/sys/fs/cgroup" ]
CMD ["/usr/sbin/init"]
root@nfs-harbor ~]# docker bulid -t xingdian .
```
构建项目镜像:
```shell
[root@nfs-harbor nginx]# cat Dockerfile
FROM xingdian
ADD nginx-1.20.2.tar.gz /usr/local
RUN rm -rf /etc/yum.repos.d/*
COPY CentOS-Base.repo /etc/yum.repos.d/
COPY epel.repo /etc/yum.repos.d/
RUN yum clean all && yum makecache fast
RUN yum -y install gcc gcc-c++ openssl openssl-devel pcre-devel zlib-devel make
WORKDIR /usr/local/nginx-1.20.2
RUN ./configure --prefix=/usr/local/nginx
RUN make && make install
WORKDIR /usr/local/nginx
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/nginx/sbin
EXPOSE 80
RUN rm -rf /usr/local/nginx/conf/nginx.conf
COPY nginx.conf /usr/local/nginx/conf/
RUN mkdir /dist
CMD ["nginx", "-g", "daemon off;"]
[root@nfs-harbor nginx]# docker build -t nginx:v2 .
```
注意:
需要事先准备好Centos的Base仓库和epel仓库
#### 2.上传项目到harbor
修改镜像tag
```shell
[root@nfs-harbor ~]# docker tag nginx:v2 10.0.0.230/xingdian/nginx:v2
```
登录私有仓库:
```shell
[root@nfs-harbor ~]# docker login 10.0.0.230
Username: xingdian
Password:
```
上传镜像:
```shell
[root@nfs-harbor ~]# docker push 10.0.0.230/xingdian/nginx:v2
```
注意:
默认上传时采用https因为我们部署的harbor使用的是http所以再上传之前按照3-1进行修改
#### 3.kubernetes集群连接harbor
修改所有kubernetes集群能够访问http仓库默认访问的是https
```shell
[root@master ~]# vim /etc/systemd/system/multi-user.target.wants/docker.service
ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry 10.0.1.13 --containerd=/run/containerd/containerd.sock
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker
```
kubernetes集群创建secret用于连接harbor
```shell
[root@master ~]# kubectl create secret docker-registry regcred --docker-server=10.0.0.230 --docker-username=diange --docker-password=QianFeng@123
[root@master ~]# kubectl get secret
NAME TYPE DATA AGE
regcred kubernetes.io/dockerconfigjson 1 19h
```
注意:
regcredsecret的名字
--docker-server指定服务器的地址
--docker-username指定harbor的用户
--docker-password指定harbor的密码
#### 4.部署NFS
部署NFS目的是为了给kubernetes集群提供持久化存储,kubernetes集群也要安装nfs-utils目的是为了支持nfs文件系统
```shell
[root@nfs-harbor ~]# yum -y install nfs-utils
[root@nfs-harbor ~]# systemctl start nfs
[root@nfs-harbor ~]# systemctl enable nfs
```
创建共享目录并对外共享
```shell
[root@nfs-harbor ~]# mkdir /kubernetes-1
[root@nfs-harbor ~]# cat /etc/exports
/kubernetes-1 *(rw,no_root_squash,sync)
[root@nfs-harbor ~]# exportfs -rv
```
项目放入共享目录下
```shell
[root@nfs-harbor ~]# git clone https://github.com/blackmed/xingdian-project.git
[root@nfs-harbor ~]# unzip dist.zip
[root@nfs-harbor ~]# cp -r dist/* /kubernetes-1
```
#### 5.创建statefulset部署项目
该yaml文件中除了statefulset以外还有service、PersistentVolume、StorageClass
```shell
[root@master xingdian]# cat Statefulset.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
name: web
targetPort: 80
nodePort: 30010
selector:
app: nginx
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: xingdian
provisioner: example.com/external-nfs
parameters:
server: 10.0.0.230
path: /kubernetes-1
readOnly: "false"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: xingdian-1
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
storageClassName: xingdian
nfs:
path: /kubernetes-1
server: 10.0.0.230
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: xingdian-2
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
storageClassName: xingdian
nfs:
path: /kubernetes-1
server: 10.0.0.230
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: 10.0.0.230/xingdian/nginx:v2
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /dist
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "xingdian"
resources:
requests:
storage: 1Gi
```
#### 6.运行
```shell
[root@master xingdian]# kubectl create -f Statefulset.yaml
service/nginx created
storageclass.storage.k8s.io/xingdian created
persistentvolume/xingdian-1 created
persistentvolume/xingdian-2 created
statefulset.apps/web created
```
## 三:项目验证
#### 1.pv验证
```shell
[root@master xingdian]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
xingdian-1 1Gi RWO Retain Bound default/www-web-1 xingdian 9m59s
xingdian-2 1Gi RWO Retain Bound default/www-web-0 xingdian 9m59s
```
#### 2.pvc验证
```shell
[root@master xingdian]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-web-0 Bound xingdian-2 1Gi RWO xingdian 10m
www-web-1 Bound xingdian-1 1Gi RWO xingdian 10m
```
#### 3.storageClass验证
```shell
[root@master xingdian]# kubectl get storageclass
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
xingdian example.com/external-nfs Delete Immediate false 10m
```
#### 4.statefulset验证
```shell
[root@master xingdian]# kubectl get statefulset
NAME READY AGE
web 2/2 13m
[root@master xingdian]# kubectl get pod
NAME READY STATUS RESTARTS AGE
web-0 1/1 Running 0 13m
web-1 1/1 Running 0 13m
```
#### 5.service验证
```shell
[root@master xingdian]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx NodePort 10.111.189.32 <none> 80:30010/TCP 13m
```
#### 6.浏览器访问
<img src="https://xingdian-image.oss-cn-beijing.aliyuncs.com/xingdian-image/image-20220502193031689.png" alt="image-20220502193031689" style="zoom:80%;" />

View File

@ -0,0 +1,314 @@
<h1><center>基于Kubernetes集群构建ES集群</center></h1>
作者:行癫(盗版必究)
------
## 一:环境准备
#### 1.Kubernetes集群环境
| 节点 | 地址 |
| :---------------: | :---------: |
| Kubernetes-Master | 10.9.12.206 |
| Kubernetes-Node-1 | 10.9.12.205 |
| Kubernetes-Node-2 | 10.9.12.204 |
| Kubernetes-Node-3 | 10.9.12.203 |
| DNS服务器 | 10.9.12.210 |
| 代理服务器 | 10.9.12.209 |
| NFS存储 | 10.9.12.250 |
#### 2.Kuboard集群管理
![image-20240420164922730](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420164922730.png)
## 二构建ES集群
#### 1.持久化存储构建
1.NFS服务器部署
2.创建共享目录
本次采用脚本创建,脚本如下
```shell
[root@xingdiancloud-1 ~]# cat nfs.sh
#!/bin/bash
read -p "请输入您要创建的共享目录:" dir
if [ -d $dir ];then
echo "请重新输入共享目录: "
read again_dir
mkdir $again_dir -p
echo "共享目录创建成功"
read -p "请输入共享对象:" ips
echo "$again_dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
xingdian=`cat /etc/exports |grep "$again_dir" |wc -l`
if [ $xingdian -eq 1 ];then
echo "成功配置共享"
exportfs -rv >/dev/null
exit
else
exit
fi
else
mkdir $dir -p
echo "共享目录创建成功"
read -p "请输入共享对象:" ips
echo "$dir ${ips}(rw,sync,no_root_squash)" >> /etc/exports
xingdian=`cat /etc/exports |grep "$dir" |wc -l`
if [ $xingdian -eq 1 ];then
echo "成功配置共享"
exportfs -rv >/dev/null
exit
else
exit
fi
fi
```
3.创建存储类
```yaml
[root@xingdiancloud-master ~]# vim namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: logging
[root@xingdiancloud-master ~]# vim storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
k8s.kuboard.cn/storageNamespace: logging
k8s.kuboard.cn/storageType: nfs_client_provisioner
name: data-es
parameters:
archiveOnDelete: 'false'
provisioner: nfs-data-es
reclaimPolicy: Retain
volumeBindingMode: Immediate
```
4.创建存储卷
```yaml
[root@xingdiancloud-master ~]# vim persistenVolume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
pv.kubernetes.io/bound-by-controller: 'yes'
finalizers:
- kubernetes.io/pv-protection
name: nfs-pv-data-es
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 100Gi
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: nfs-pvc-data-es
namespace: kube-system
nfs:
path: /data/es-data
server: 10.9.12.250
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-storageclass-provisioner
volumeMode: Filesystem
```
注意存储类和存储卷也可以使用Kuboard界面创建
#### 2.设定节点标签
```shell
[root@xingdiancloud-master ~]# kubectl label nodes xingdiancloud-node-1 es=log
```
注意:
所有运行ES的节点需要进行标签的设定
目的配合接下来的StatefulSet部署ES集群
#### 3.ES集群部署
注意由于ES集群每个节点需要唯一的网络标识并需要持久化存储Deployment不能实现该特点只能进行无状态应用的部署故本次将采用StatefulSet进行部署。
```yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es
namespace: logging
spec:
serviceName: elasticsearch
replicas: 3
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
nodeSelector:
es: log
initContainers:
- name: increase-vm-max-map
image: busybox
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
ports:
- name: rest
containerPort: 9200
- name: inter
containerPort: 9300
resources:
limits:
cpu: 500m
memory: 4000Mi
requests:
cpu: 500m
memory: 3000Mi
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name
value: k8s-logs
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: cluster.initial_master_nodes
value: "es-0,es-1,es-2"
- name: discovery.zen.minimum_master_nodes
value: "2"
- name: discovery.seed_hosts
value: "elasticsearch"
- name: ESJAVAOPTS
value: "-Xms512m -Xmx512m"
- name: network.host
value: "0.0.0.0"
- name: node.max_local_storage_nodes
value: "3"
volumeClaimTemplates:
- metadata:
name: data
labels:
app: elasticsearch
spec:
accessModes: [ "ReadWriteMany" ]
storageClassName: data-es
resources:
requests:
storage: 25Gi
```
#### 4.创建Services发布ES集群
```yaml
[root@xingdiancloud-master ~]# vim elasticsearch-svc.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch
namespace: logging
labels:
app: elasticsearch
spec:
selector:
app: elasticsearch
type: NodePort
ports:
- port: 9200
targetPort: 9200
nodePort: 30010
name: rest
- port: 9300
name: inter-node
```
#### 5.访问测试
注意:
使用elasticVUE插件访问集群
集群状态正常
集群所有节点正常
![image-20240420172247845](https://diandiange.oss-cn-beijing.aliyuncs.com/image-20240420172247845.png)
## 三代理及DNS配置
#### 1.代理配置
注意:
部署略
在此使用Nginx作为代理
基于用户的访问控制用户和密码自行创建htpasswd
配置文件如下
```shell
[root@proxy ~]# cat /etc/nginx/conf.d/elasticsearch.conf
server {
listen 80;
server_name es.xingdian.com;
location / {
auth_basic "xingdiancloud kibana";
auth_basic_user_file /etc/nginx/pass;
proxy_pass http://地址+端口;
}
}
```
#### 2.域名解析配置
注意:
部署略
配置如下
```shell
[root@www ~]# cat /var/named/xingdian.com.zone
$TTL 1D
@ IN SOA @ rname.invalid. (
0 ; serial
1D ; refresh
1H ; retry
1W ; expire
3H ) ; minimum
NS @
A DNS地址
es A 代理地址
AAAA ::1
```
#### 3.访问测试

View File

@ -0,0 +1,684 @@
<h1><center>基于kubernetes部署Prometheus和Grafana</center></h1>
著作:行癫 <盗版必究>
------
## 一:环境准备
#### 1.kubernetes集群正常
```shell
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 36d v1.23.1
node-1 Ready <none> 36d v1.23.1
node-2 Ready <none> 36d v1.23.1
node-3 Ready <none> 36d v1.23.1
```
#### 2.harbor仓库正常
![image-20220602010601512](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602010601512.png)
## 二Prometheus部署
#### 1.node-exporter部署
node-exporter可以采集机器物理机、虚拟机、云主机等的监控指标数据能够采集到的指标包括CPU, 内存,磁盘,网络,文件数等信息
创建监控namespace
```shell
[root@master ~]# kubectl create ns monitor-sa
```
创建node-export.yaml
```shell
[root@master ~]# vim node-export.yaml
apiVersion: apps/v1
kind: DaemonSet # 可以保证k8s集群的每个节点都运行完全一样的pod
metadata:
name: node-exporter
namespace: monitor-sa
labels:
name: node-exporter
spec:
selector:
matchLabels:
name: node-exporter
template:
metadata:
labels:
name: node-exporter
spec:
hostPID: true
hostIPC: true
hostNetwork: true
containers:
- name: node-exporter
image: prom/node-exporter:v0.16.0
#image: 10.0.0.230/xingdian/node-exporter:v0.16.0
ports:
- containerPort: 9100
resources:
requests:
cpu: 0.15 # 这个容器运行至少需要0.15核cpu
securityContext:
privileged: true # 开启特权模式
args:
- --path.procfs
- /host/proc
- --path.sysfs
- /host/sys
- --collector.filesystem.ignored-mount-points
- '"^/(sys|proc|dev|host|etc)($|/)"'
volumeMounts:
- name: dev
mountPath: /host/dev
- name: proc
mountPath: /host/proc
- name: sys
mountPath: /host/sys
- name: rootfs
mountPath: /rootfs
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
volumes:
- name: proc
hostPath:
path: /proc
- name: dev
hostPath:
path: /dev
- name: sys
hostPath:
path: /sys
- name: rootfs
hostPath:
path: /
```
注意:
hostNetwork、hostIPC、hostPID都为True时表示这个Pod里的所有容器会直接使用宿主机的网络直接与宿主机进行IPC进程间通信通信可以看到宿主机里正在运行的所有进程。加入了hostNetwork:true会直接将我们的宿主机的9100端口映射出来从而不需要创建service 在我们的宿主机上就会有一个9100的端口
创建:
```shell
[root@master ~]# kubectl apply -f node-export.yaml
```
查看node-exporter是否部署成功
```shell
[root@master ~]# kubectl get pods -n monitor-sa -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
node-exporter-2cbrg 1/1 Running 0 34m 10.0.0.220 master <none> <none>
node-exporter-7rrbh 1/1 Running 0 34m 10.0.0.222 node-2 <none> <none>
node-exporter-96v29 1/1 Running 0 34m 10.0.0.221 node-1 <none> <none>
node-exporter-bf2j8 1/1 Running 0 34m 10.0.0.223 node-3 <none> <none>
```
注意:
node-export默认的监听端口是9100可以看到当前主机获取到的所有监控数据
```shell
[root@master ~]# curl http://10.0.0.220:9100/metrics | grep node_cpu_seconds
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0# HELP node_cpu_seconds_total Seconds the cpus spent in each mode.
# TYPE node_cpu_seconds_total counter
node_cpu_seconds_total{cpu="0",mode="idle"} 8398.49
node_cpu_seconds_total{cpu="0",mode="iowait"} 1.54
node_cpu_seconds_total{cpu="0",mode="irq"} 0
node_cpu_seconds_total{cpu="0",mode="nice"} 0
node_cpu_seconds_total{cpu="0",mode="softirq"} 17.2
node_cpu_seconds_total{cpu="0",mode="steal"} 0
node_cpu_seconds_total{cpu="0",mode="system"} 70.61
node_cpu_seconds_total{cpu="0",mode="user"} 187.04
node_cpu_seconds_total{cpu="1",mode="idle"} 8403.82
node_cpu_seconds_total{cpu="1",mode="iowait"} 4.95
node_cpu_seconds_total{cpu="1",mode="irq"} 0
node_cpu_seconds_total{cpu="1",mode="nice"} 0
node_cpu_seconds_total{cpu="1",mode="softirq"} 16.75
node_cpu_seconds_total{cpu="1",mode="steal"} 0
node_cpu_seconds_total{cpu="1",mode="system"} 71.26
node_cpu_seconds_total{cpu="1",mode="user"} 190.27
100 74016 100 74016 0 0 5878k 0 --:--:-- --:--:-- --:--:-- 6023k
[root@master ~]# curl http://10.0.0.220:9100/metrics | grep node_load
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0# HELP node_loa
1 1m load average.
# TYPE node_load1 gauge
node_load1 0.2
# HELP node_load15 15m load average.
# TYPE node_load15 gauge
node_load15 0.22
# HELP node_load5 5m load average.
# TYPE node_load5 gauge
node_load5 0.2
100 74044 100 74044 0 0 8604k 0 --:--:-- --:--:-- --:--:-- 9038k
```
#### 2.Prometheus安装
创建sa账号对sa做rbac授权
```shell
# 创建一个sa账号monitor
[root@master ~]# kubectl create serviceaccount monitor -n monitor-sa
# 把sa账号monitor通过clusterrolebing绑定到clusterrole上
[root@master ~]# kubectl create clusterrolebinding monitor-clusterrolebinding -n monitor-sa --clusterrole=cluster-admin --serviceaccount=monitor-sa:monitor
```
创建prometheus数据存储目录
```shell
# 将prometheus调度到node-1节点
[root@node-1 ~]# mkdir /data && chmod 777 /data
```
创建一个configmap存储卷用来存放prometheus配置信息
```shell
[root@master ~]# vim prometheus-cfg.yaml
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
app: prometheus
name: prometheus-config
namespace: monitor-sa
data:
prometheus.yml: |
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 1m
scrape_configs:
- job_name: 'kubernetes-node'
kubernetes_sd_configs:
- role: node
relabel_configs:
- source_labels: [__address__]
regex: '(.*):10250'
replacement: '${1}:9100'
target_label: __address__
action: replace
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- job_name: 'kubernetes-node-cadvisor'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: 'kubernetes-apiserver'
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
```
创建:
```shell
[root@master ~]# kubectl apply -f prometheus-cfg.yaml
configmap/prometheus-config created
```
配置详解:
```shell
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
app: prometheus
name: prometheus-config
namespace: monitor-sa
data:
prometheus.yml: |
global:
scrape_interval: 15s #采集目标主机监控据的时间间隔
scrape_timeout: 10s # 数据采集超时时间默认10s
evaluation_interval: 1m #触发告警检测的时间默认是1m
scrape_configs: # 配置数据源称为target每个target用job_name命名。又分为静态配置和服务发现
- job_name: 'kubernetes-node'
kubernetes_sd_configs: # 使用的是k8s的服务发现
- role: node # 使用node角色它使用默认的kubelet提供的http端口来发现集群中每个node节点
relabel_configs: # 重新标记
- source_labels: [__address__] # 配置的原始标签,匹配地址
regex: '(.*):10250' #匹配带有10250端口的url
replacement: '${1}:9100' #把匹配到的ip:10250的ip保留
target_label: __address__ #新生成的url是${1}获取到的ip:9100
action: replace # 动作替换
- action: labelmap
regex: __meta_kubernetes_node_label_(.+) #匹配到下面正则表达式的标签会被保留,如果不做regex正则的话默认只是会显示instance标签
- job_name: 'kubernetes-node-cadvisor' # 抓取cAdvisor数据是获取kubelet上/metrics/cadvisor接口数据来获取容器的资源使用情况
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap # 把匹配到的标签保留
regex: __meta_kubernetes_node_label_(.+) #保留匹配到的具有__meta_kubernetes_node_label的标签
- target_label: __address__ # 获取到的地址__address__="192.168.40.180:10250"
replacement: kubernetes.default.svc:443 # 把获取到的地址替换成新的地址kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+) # 把原始标签中__meta_kubernetes_node_name值匹配到
target_label: __metrics_path__ #获取__metrics_path__对应的值
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# 把metrics替换成新的值api/v1/nodes/k8s-master1/proxy/metrics/cadvisor
# ${1}是__meta_kubernetes_node_name获取到的值
# 新的url就是https://kubernetes.default.svc:443/api/v1/nodes/k8s-master1/proxy/metrics/cadvisor
- job_name: 'kubernetes-apiserver'
kubernetes_sd_configs:
- role: endpoints # 使用k8s中的endpoint服务发现采集apiserver 6443端口获取到的数据
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
# endpoint这个对象的名称空间,endpoint对象的服务名,exnpoint的端口名称
action: keep # 采集满足条件的实例,其他实例不采集
regex: default;kubernetes;https #正则匹配到的默认空间下的service名字是kubernetes协议是https的endpoint类型保留下来
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
# 重新打标仅抓取到的具有 "prometheus.io/scrape: true" 的annotation的端点意思是说如果某个service具有prometheus.io/scrape = true annotation声明则抓取annotation本身也是键值结构所以这里的源标签设置为键而regex设置值true当值匹配到regex设定的内容时则执行keep动作也就是保留其余则丢弃。
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
# 重新设置scheme匹配源标签__meta_kubernetes_service_annotation_prometheus_io_scheme也就是prometheus.io/scheme annotation如果源标签的值匹配到regex则把值替换为__scheme__对应的值。
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
# 应用中自定义暴露的指标也许你暴露的API接口不是/metrics这个路径那么你可以在这个POD对应的service中做一个"prometheus.io/path = /mymetrics" 声明上面的意思就是把你声明的这个路径赋值给__metrics_path__其实就是让prometheus来获取自定义应用暴露的metrices的具体路径不过这里写的要和service中做好约定如果service中这样写 prometheus.io/app-metrics-path: '/metrics' 那么你这里就要__meta_kubernetes_service_annotation_prometheus_io_app_metrics_path这样写。
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
# 暴露自定义的应用的端口就是把地址和你在service中定义的 "prometheus.io/port = <port>" 声明做一个拼接然后赋值给__address__这样prometheus就能获取自定义应用的端口然后通过这个端口再结合__metrics_path__来获取指标如果__metrics_path__值不是默认的/metrics那么就要使用上面的标签替换来获取真正暴露的具体路径。
- action: labelmap #保留下面匹配到的标签
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace # 替换__meta_kubernetes_namespace变成kubernetes_namespace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
```
通过deployment部署prometheus
```shell
[root@master ~]# cat prometheus-deploy.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-server
namespace: monitor-sa
labels:
app: prometheus
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
component: server
#matchExpressions:
#- {key: app, operator: In, values: [prometheus]}
#- {key: component, operator: In, values: [server]}
template:
metadata:
labels:
app: prometheus
component: server
annotations:
prometheus.io/scrape: 'false'
spec:
nodeName: node-1 # 指定pod调度到哪个节点上
serviceAccountName: monitor
containers:
- name: prometheus
image: prom/prometheus:v2.2.1
#image: 10.0.0.230/xingdian/prometheus:v2.2.1
imagePullPolicy: IfNotPresent
command:
- prometheus
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus # 数据存储目录
- --storage.tsdb.retention=720h # 数据保存时长
- --web.enable-lifecycle # 开启热加载
ports:
- containerPort: 9090
protocol: TCP
volumeMounts:
- mountPath: /etc/prometheus/prometheus.yml
name: prometheus-config
subPath: prometheus.yml
- mountPath: /prometheus/
name: prometheus-storage-volume
volumes:
- name: prometheus-config
configMap:
name: prometheus-config
items:
- key: prometheus.yml
path: prometheus.yml
mode: 0644
- name: prometheus-storage-volume
hostPath:
path: /data
type: Directory
```
创建:
```shell
[root@master ~]# kubectl apply -f prometheus-deploy.yaml
deployment.apps/prometheus-server created
```
查看:
```shell
[root@master ~]# kubectl get pods -o wide -n monitor-sa
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
prometheus-server-59cb5d648-bxwrb 1/1 Running 0 14m 10.244.2.100 node-1 <none> <none>
```
#### 3.prometheus pod创建service
```shell
[root@master ~]# cat prometheus-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: monitor-sa
labels:
app: prometheus
spec:
type: NodePort
ports:
- port: 9090
targetPort: 9090
protocol: TCP
selector:
app: prometheus
component: server
```
创建:
```shell
[root@master ~]# kubectl apply -f prometheus-svc.yaml
service/prometheus created
```
查看service在物理机映射的端口
```shell
[root@master ~]# kubectl get svc -n monitor-sa
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
prometheus NodePort 10.106.61.80 <none> 9090:32169/TCP 32m
```
#### 4.web界面查看
![image-20220602011956600](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602011956600.png)
![image-20220602012012382](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602012012382.png)
#### 5.Prometheus热加载
```shell
# 为了每次修改配置文件可以热加载prometheus也就是不停止prometheus就可以使配置生效想要使配置生效可用如下热加载命令
[root@master ~]# kubectl get pods -n monitor-sa -o wide -l app=prometheus
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
prometheus-server-689fb8cdbc-kcsw2 1/1 Running 0 5m39s 10.244.36.70 k8s-node1 <none> <none>
# 想要使配置生效可用如下命令热加载:
[root@master ~]# curl -X POST http://10.244.36.70:9090/-/reload
# 查看log
[root@master ~]# kubectl logs -n monitor-sa prometheus-server-689fb8cdbc-kcsw2
```
注意:
```shell
# 热加载速度比较慢可以暴力重启prometheus如修改上面的prometheus-cfg.yaml文件之后可执行如下强制删除
[root@master ~]# kubectl delete -f prometheus-cfg.yaml
[root@master ~]# kubectl delete -f prometheus-deploy.yaml
# 然后再通过apply更新
[root@master ~]# kubectl apply -f prometheus-cfg.yaml
[root@master ~]# kubectl apply -f prometheus-deploy.yaml
#注意:线上最好热加载,暴力删除可能造成监控数据的丢失
```
## 三Grafana的部署
#### 1.Grafana介绍
Grafana是一个跨平台的开源的度量分析和可视化工具可以将采集的数据可视化的展示并及时通知给告警接收方
它主要有以下六大特点:
1展示方式快速灵活的客户端图表面板插件有许多不同方式的可视化指标和日志官方库中具有丰富的仪表盘插件比如热图、折线图、图表等多种展示方式
2数据源GraphiteInfluxDBOpenTSDBPrometheusElasticsearchCloudWatch和KairosDB等
3通知提醒以可视方式定义最重要指标的警报规则Grafana将不断计算并发送通知在数据达到阈值时通过Slack、PagerDuty等获得通知
4混合展示在同一图表中混合使用不同的数据源可以基于每个查询指定数据源甚至自定义数据源
5注释使用来自不同数据源的丰富事件注释图表将鼠标悬停在事件上会显示完整的事件元数据和标记
#### 2.Grafana安装
```shell
[root@master prome]# cat grafana.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: monitoring-grafana
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
task: monitoring
k8s-app: grafana
template:
metadata:
labels:
task: monitoring
k8s-app: grafana
spec:
containers:
- name: grafana
image: 10.0.0.230/xingdian/heapster-grafana-amd64:v5.0.4
#heleicool/heapster-grafana-amd64:v5.0.4
ports:
- containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certificates
readOnly: true
- mountPath: /var
name: grafana-storage
env:
- name: INFLUXDB_HOST
value: monitoring-influxdb
- name: GF_SERVER_HTTP_PORT
value: "3000"
# The following env variables are required to make Grafana accessible via
# the kubernetes api-server proxy. On production clusters, we recommend
# removing these env variables, setup auth for grafana, and expose the grafana
# service using a LoadBalancer or a public IP.
- name: GF_AUTH_BASIC_ENABLED
value: "false"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
- name: GF_SERVER_ROOT_URL
# If you're only using the API Server proxy, set this value instead:
# value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
value: /
volumes:
- name: ca-certificates
hostPath:
path: /etc/ssl/certs
- name: grafana-storage
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-grafana
name: monitoring-grafana
namespace: kube-system
spec:
# In a production setup, we recommend accessing Grafana through an external Loadbalancer
# or through a public IP.
# type: LoadBalancer
# You could also use NodePort to expose the service at a randomly-generated port
# type: NodePort
ports:
- port: 80
targetPort: 3000
selector:
k8s-app: grafana
type: NodePort
```
创建:
```shell
[root@master prome]# kubectl apply -f grafana.yaml
deployment.apps/monitoring-grafana created
service/monitoring-grafana created
```
查看:
```shell
[root@master prome]# kubectl get pods -n kube-system -l task=monitoring -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
monitoring-grafana-7c5c6c7486-rbt62 1/1 Running 0 9s 10.244.1.83 node-3 <none> <none>
```
```shell
[root@master prome]# kubectl get svc -n kube-system | grep grafana
monitoring-grafana NodePort 10.101.77.194 <none> 80:30919/TCP 76s
```
## 四配置Grafana
浏览器访问:
![image-20220602013222284](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013222284.png)
添加数据源:
![image-20220602013322234](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013322234.png)
指定Prometheus地址
![image-20220602013441712](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013441712.png)
导入监控模板:
![image-20220602013943317](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602013943317.png)
![image-20220602014027197](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014027197.png)
注意:
官方下载监控模板https://grafana.com/dashboards?dataSource=prometheus&search=kubernetes
![image-20220602014152927](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014152927.png)
![image-20220602014212551](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014212551.png)
展示:
![image-20220602014306247](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014306247.png)
![image-20220602014321106](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014321106.png)
![image-20220602014337431](%E5%9F%BA%E4%BA%8Ekubernetes%E9%83%A8%E7%BD%B2Prometheus%E5%92%8CGrafana.assets/image-20220602014337431.png)