这里分类和汇总了欣宸的全部原创(含配套源码):https://github.com/zq2599/blog_demos
sudo yum install openssh-server -y
sudo passwd root
yum install redhat-lsb lrzsz wget -y
systemctl stop firewalld && \
systemctl disable firewalld && \
setenforce 0
tar -zxvf sealos_4.1.3_linux_amd64.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin
sealos run labring/kubernetes-docker:v1.22.8-4.1.3 labring/helm:v3.8.2 labring/calico:v3.24.1 --single
[root@VM-12-12-centos k8s]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
calico-apiserver calico-apiserver-67b7856948-bg2wh 1/1 Running 0 12s
calico-apiserver calico-apiserver-67b7856948-fz64n 1/1 Running 0 12s
calico-system calico-kube-controllers-78687bb75f-z2r7m 1/1 Running 0 41s
calico-system calico-node-l6nmw 1/1 Running 0 41s
calico-system calico-typha-b46ff96f6-qqzxb 1/1 Running 0 41s
calico-system csi-node-driver-lv2g2 2/2 Running 0 21s
kube-system coredns-78fcd69978-57r7x 1/1 Running 0 47s
kube-system coredns-78fcd69978-psjcs 1/1 Running 0 46s
kube-system etcd-vm-12-12-centos 1/1 Running 0 60s
kube-system kube-apiserver-vm-12-12-centos 1/1 Running 0 61s
kube-system kube-controller-manager-vm-12-12-centos 1/1 Running 0 62s
kube-system kube-proxy-x8nhg 1/1 Running 0 47s
kube-system kube-scheduler-vm-12-12-centos 1/1 Running 0 60s
tigera-operator tigera-operator-6f669b6c4f-t8t9h 1/1 Running 0 46s
kubectl taint nodes --all node-role.kubernetes.io/master-
tar zxvf sealos_4.1.3_linux_amd64.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin
sealos load -i kubernetes.tar
sealos load -i helm.tar
sealos load -i calico.tar
sealos run labring/kubernetes-docker:v1.22.8-4.1.3 labring/helm:v3.8.2 labring/calico:v3.24.1 --single
[root@VM-12-12-centos k8s]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
calico-apiserver calico-apiserver-67b7856948-bg2wh 1/1 Running 0 12s
calico-apiserver calico-apiserver-67b7856948-fz64n 1/1 Running 0 12s
calico-system calico-kube-controllers-78687bb75f-z2r7m 1/1 Running 0 41s
calico-system calico-node-l6nmw 1/1 Running 0 41s
calico-system calico-typha-b46ff96f6-qqzxb 1/1 Running 0 41s
calico-system csi-node-driver-lv2g2 2/2 Running 0 21s
kube-system coredns-78fcd69978-57r7x 1/1 Running 0 47s
kube-system coredns-78fcd69978-psjcs 1/1 Running 0 46s
kube-system etcd-vm-12-12-centos 1/1 Running 0 60s
kube-system kube-apiserver-vm-12-12-centos 1/1 Running 0 61s
kube-system kube-controller-manager-vm-12-12-centos 1/1 Running 0 62s
kube-system kube-proxy-x8nhg 1/1 Running 0 47s
kube-system kube-scheduler-vm-12-12-centos 1/1 Running 0 60s
tigera-operator tigera-operator-6f669b6c4f-t8t9h 1/1 Running 0 46s
kubectl taint nodes --all node-role.kubernetes.io/master-
docker load < busybox.tar
docker load < local-pv.tar
docker pull docker.io/willdockerhub/prometheus-adapter:v0.9.1
docker tag docker.io/willdockerhub/prometheus-adapter:v0.9.1 k8s.gcr.io/prometheus-adapter/prometheus-adapter:v0.9.1
docker pull bolingcavalry/kube-state-metrics:v2.3.0
docker tag bolingcavalry/kube-state-metrics:v2.3.0 k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.3.0
接下来对下载的配置文件做些修改
首先是修改Grafana的service文件&#xff0c;因为默认的service是ClusterIP类型&#xff0c;无法从kubernetes外部访问的&#xff0c;这里改成集群外也可以访问service&#xff0c;打开文件kube-prometheus-0.10.0/manifests/grafana-service.yaml&#xff0c;新增箭头所指的两行&#xff0c;注意31330端口是我随便找的一个可用端口&#xff0c;您也可以随意&#xff0c;在30000-32767之间即可
如果想通过外部访问prometheus&#xff0c;可以用上述同样的方法修改prometheus-service.yaml文件
另外&#xff0c;如果您像我一样是将服务部署在云上&#xff08;我这里是腾讯云&#xff09;&#xff0c;就涉及到公网IP的使用&#xff0c;请查找文件grafana-networkPolicy.yaml&#xff0c;如果有就删除&#xff08;0.10.0-release版本没有这个文件&#xff09;&#xff0c;如果不删除&#xff0c;就无发通过公网IP访问grafana
对于prometheus也是一样&#xff0c;如果想从公网IP访问prometheus&#xff0c;需要删除文件prometheus-networkPolicy.yaml
storage:
volumeClaimTemplate:
spec:
storageClassName: local-path
resources:
requests:
storage: 10Gi
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: grafana
namespace: monitoring
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.32.1
name: prometheus-k8s
namespace: monitoring
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
- services
- endpoints
- pods
verbs:
- get
- list
- watch
- nonResourceURLs:
- /metrics
verbs:
- get
kubectl create clusterrolebinding kube-state-metrics-admin-binding \
--clusterrole&#61;cluster-admin \
--user&#61;system:serviceaccount:monitoring:kube-state-metrics
kubectl apply --server-side -f manifests/setup
Error from server (NotFound): namespaces "monitoring" not found
kubectl apply -f manifests/
[root&#64;VM-12-12-centos local-path-provisioner]# pwd
/opt/local-path-provisioner
[root&#64;VM-12-12-centos local-path-provisioner]# ls -al
total 20
drwxr-xr-x 5 root root 4096 Nov 5 10:02 .
drwxr-xr-x. 7 root root 4096 Oct 31 08:29 ..
drwxrwxrwx 3 root root 4096 Nov 5 10:06 pvc-0b8b93ae-944c-4433-a904-71c00d65c8ad_monitoring_prometheus-k8s-db-prometheus-k8s-0
drwxrwxrwx 6 root root 4096 Nov 5 10:53 pvc-9a5dccda-673f-4db8-a64e-baf4df5006cf_monitoring_grafana
drwxrwxrwx 3 root root 4096 Nov 5 10:06 pvc-bccf40a7-59a5-4472-a6a3-65f4898255d6_monitoring_prometheus-k8s-db-prometheus-k8s-1
至此&#xff0c;k8s&#43;pv&#43;prometheus&#43;grafana环境已经搭建完成&#xff0c;这样的环境离生产环境还差得远&#xff0c;但是作为日常开发和测试已经基本合格&#xff0c;如果您对云原生感兴趣&#xff0c;想要简单快速的从零开始搭建一套环境用于学习和研发&#xff0c;希望本文能给您提供一些参考
编写本文时&#xff0c;有些操作步骤参考了这位大佬的&#xff0c;在此表示感谢&#xff1a;http://www.javajun.net/posts/5019/index.html