[root@k8s-master k8s-prom]# kubectl get all -n prom NAME READY STATUS RESTARTS AGE pod/prometheus-node-exporter-6srrq 1/1 Running 0 11m pod/prometheus-node-exporter-fftmc 1/1 Running 0 11m pod/prometheus-node-exporter-qlr8d 1/1 Running 0 11m pod/prometheus-server-66cbd4c6b-j9lqr 1/1 Running 0 4m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/prometheus NodePort 10.96.65.72 <none> 9090:30090/TCP 10m service/prometheus-node-exporter ClusterIP None <none> 9100/TCP 11m
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/prometheus-node-exporter 3 3 3 3 3 <none> 11m
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE deployment.apps/prometheus-server 1 1 1 1 10m
NAME DESIRED CURRENT READY AGE replicaset.apps/prometheus-server-65f5d59585 0 0 0 10m replicaset.apps/prometheus-server-66cbd4c6b 1 1 1 4m
2.4、部署kube-sate-metrics
1 2 3 4 5 6 7 8 9 10 11 12 13 14
[root@k8s-master k8s-prom]# kubectl apply -f kube-state-metrics/ deployment.apps/kube-state-metrics created serviceaccount/kube-state-metrics created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created service/kube-state-metrics created
[root@k8s-master k8s-prom]# kubectl apply -f k8s-prometheus-adapter/ clusterrolebinding.rbac.authorization.k8s.io/custom-metrics:system:auth-delegator created rolebinding.rbac.authorization.k8s.io/custom-metrics-auth-reader created deployment.apps/custom-metrics-apiserver created clusterrolebinding.rbac.authorization.k8s.io/custom-metrics-resource-reader created serviceaccount/custom-metrics-apiserver created service/custom-metrics-apiserver created apiservice.apiregistration.k8s.io/v1beta1.custom.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/custom-metrics-server-resources created clusterrole.rbac.authorization.k8s.io/custom-metrics-resource-reader created clusterrolebinding.rbac.authorization.k8s.io/hpa-controller-custom-metrics created configmap/adapter-config created
[root@k8s-master k8s-prom]# cat grafana.yaml apiVersion: apps/v1 kind: Deployment metadata: name: monitoring-grafana namespace: prom #修改名称空间 spec: replicas: 1 selector: matchLabels: task: monitoring k8s-app: grafana template: metadata: labels: task: monitoring k8s-app: grafana spec: containers: - name: grafana image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-grafana-amd64:v5.0.4 ports: - containerPort: 3000 protocol: TCP volumeMounts: - mountPath: /etc/ssl/certs name: ca-certificates readOnly: true - mountPath: /var name: grafana-storage env: #这里使用的是原先的heapster的grafana的配置文件,需要注释掉这个环境变量 #- name: INFLUXDB_HOST # value: monitoring-influxdb - name: GF_SERVER_HTTP_PORT value: "3000" # The following env variables are required to make Grafana accessible via # the kubernetes api-server proxy. On production clusters, we recommend # removing these env variables, setup auth for grafana, and expose the grafana # service using a LoadBalancer or a public IP. - name: GF_AUTH_BASIC_ENABLED value: "false" - name: GF_AUTH_ANONYMOUS_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ORG_ROLE value: Admin - name: GF_SERVER_ROOT_URL # If you're only using the API Server proxy, set this value instead: # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy value: / volumes: - name: ca-certificates hostPath: path: /etc/ssl/certs - name: grafana-storage emptyDir: {} --- apiVersion: v1 kind: Service metadata: labels: # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. kubernetes.io/cluster-service: 'true' kubernetes.io/name: monitoring-grafana name: monitoring-grafana namespace: prom spec: # In a production setup, we recommend accessing Grafana through an external Loadbalancer # or through a public IP. # type: LoadBalancer # You could also use NodePort to expose the service at a randomly-generated port type: NodePort ports: - port: 80 targetPort: 3000 selector: k8s-app: grafana
[root@k8s-master k8s-prom]# kubectl apply -f grafana.yaml deployment.apps/monitoring-grafana created service/monitoring-grafana created