http { upstream aiohttp { server 127.0.0.1:8081 fail_timeout=0; server 127.0.0.1:8082 fail_timeout=0; server 127.0.0.1:8083 fail_timeout=0; server 127.0.0.1:8084 fail_timeout=0; }
NAT的中继穿越方式Traversal Using Relays around NAT (TURN) 通过TURN服务器中继所有数据的方式来绕过“对称型NAT”。你需要在TURN服务器上创建一个连接,然后告诉所有对端设备发包到服务器上,TURN服务器再把包转发给你。很显然这种方式是开销很大的,所以只有在没得选择的情况下采用。
RECORD_SECONDS = record_seconds try: ret = self.recognizer.start() if ret < 0: return ret for i in range(0, int(self.RATE / self.CHUNK * RECORD_SECONDS)): data = self.stream.read(self.CHUNK) ret = self.recognizer.send(data) if ret < 0: break self.recognizer.stop() res = self.callback.completed return res except Exception as e: print(str(e)) finally: self.recognizer.close()
[root@k8s-master ansible]# kubectl get svc -n kube-system #查看服务 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE heapster ClusterIP 10.68.29.48 <none> 80/TCP 64s kube-dns ClusterIP 10.68.0.2 <none> 53/UDP,53/TCP,9153/TCP 71s kubernetes-dashboard NodePort 10.68.117.7 <none> 443:24190/TCP 64s metrics-server ClusterIP 10.68.107.56 <none> 443/TCP 69s
[root@k8s-master ansible]# kubectl cluster-info #查看集群信息 Kubernetes master is running at https://192.168.56.110:8443 CoreDNS is running at https://192.168.56.110:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy kubernetes-dashboard is running at https://192.168.56.110:8443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@k8s-master ~]# helm init Creating /root/.helm Creating /root/.helm/repository Creating /root/.helm/repository/cache Creating /root/.helm/repository/local Creating /root/.helm/plugins Creating /root/.helm/starters Creating /root/.helm/cache/archive Creating /root/.helm/repository/repositories.yaml Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com Adding local repo with URL: http://127.0.0.1:8879/charts $HELM_HOME has been configured at /root/.helm.
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy. For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation Happy Helming!
[root@k8s-master ~]# helm search NAME CHART VERSION APP VERSION DESCRIPTION stable/acs-engine-autoscaler 2.1.3 2.1.1 Scales worker nodes within agent pools stable/aerospike 0.1.7 v3.14.1.2 A Helm chart for Aerospike in Kubernetes stable/anchore-engine 0.1.3 0.1.6 Anchore container analysis and policy evaluatio... ......
[root@k8s-master helm]# helm repo update #仓库更新有时会提示无法连接 Hang tight while we grab the latest from your chart repositories... ...Skip local chart repository ...Unable to get an update from the "stable" chart repository (https://kubernetes-charts.storage.googleapis.com): Get https://kubernetes-charts.storage.googleapis.com/index.yaml: dial tcp 216.58.220.208:443: connect: connection refused Update Complete. ⎈ Happy Helming!⎈
[root@k8s-master helm]# helm repo list NAME URL stable https://kubernetes-charts.storage.googleapis.com local http://127.0.0.1:8879/charts
[root@k8s-master helm]# helm repo remove stable #移除stable repo "stable" has been removed from your repositories [root@k8s-master helm]# helm repo list NAME URL local http://127.0.0.1:8879/charts
#增加阿里云的charts仓库 [root@k8s-master helm]# helm repo add stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts "stable" has been added to your repositories [root@k8s-master helm]# helm repo list NAME URL local http://127.0.0.1:8879/charts stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts [root@k8s-master helm]# helm repo update #再次更新repo Hang tight while we grab the latest from your chart repositories... ...Skip local chart repository ...Successfully got an update from the "stable" chart repository Update Complete. ⎈ Happy Helming!⎈
与 yum 一样,helm 也支持关键字搜索:
1 2 3 4 5 6 7
[root@k8s-master ~]# helm search mysql NAME CHART VERSION APP VERSION DESCRIPTION stable/mysql 0.3.5 Fast, reliable, scalable, and easy to use open-... stable/percona 0.3.0 free, fully compatible, enhanced, open source d... stable/percona-xtradb-cluster 0.0.2 5.7.19 free, fully compatible, enhanced, open source d... stable/gcloud-sqlproxy 0.2.3 Google Cloud SQL Proxy stable/mariadb 2.1.6 10.1.31 Fast, reliable, scalable, and easy to use open-...
3. Connect using the mysql cli, then provide your password: $ mysql -h reeling-bronco-mysql -p
To connect to your database directly from outside the K8s cluster: MYSQL_HOST=127.0.0.1 MYSQL_PORT=3306
# Execute the following commands to route the connection: export POD_NAME=$(kubectl get pods --namespace default -l "app=reeling-bronco-mysql" -o jsonpath="{.items[0].metadata.name}") kubectl port-forward $POD_NAME 3306:3306
mysql -h ${MYSQL_HOST} -P${MYSQL_PORT} -u root -p${MYSQL_ROOT_PASSWORD}
输出分为三部分:
① chart 本次部署的描述信息:
NAME 是 release 的名字,因为我们没用 -n 参数指定,Helm 随机生成了一个,这里是 reeling-bronco。
② 当前 release 包含的资源:Service、Deployment、Secret 和 PersistentVolumeClaim,其名字都是 reeling-bronco-mysql,命名的格式为 ReleasName-ChartName。
③ NOTES 部分显示的是 release 的使用方法。比如如何访问 Service,如何获取数据库密码,以及如何连接数据库等。
通过 kubectl get 可以查看组成 release 的各个对象:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
[root@k8s-master helm]# kubectl get service reeling-bronco-mysql NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE reeling-bronco-mysql ClusterIP 10.99.245.169 <none> 3306/TCP 3m
[root@k8s-master helm]# kubectl get deployment reeling-bronco-mysql NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE reeling-bronco-mysql 1 1 1 0 3m
[root@k8s-master helm]# kubectl get pvc reeling-bronco-mysql NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE reeling-bronco-mysql Pending 4m
[root@k8s-master helm]# kubectl get secret reeling-bronco-mysql NAME TYPE DATA AGE reeling-bronco-mysql Opaque 2 4m
由于我们还没有准备 PersistentVolume,当前 release 还不可用。
helm list 显示已经部署的 release,helm delete 可以删除 release。
1 2 3 4 5 6
[root@k8s-master helm]# helm list NAME REVISION UPDATED STATUS CHART NAMESPACE reeling-bronco 1 Wed Mar 27 03:10:31 2019 DEPLOYED mysql-0.3.5 default
② Chart 和 Release 是 Helm 预定义的对象,每个对象都有自己的属性,可以在模板中使用。如果使用下面命令安装 chart:
1 2 3 4
[root@k8s-master templates]# helm search stable/mysql NAME CHART VERSION APP VERSION DESCRIPTION stable/mysql 0.3.5 Fast, reliable, scalable, and easy to use open-... [root@k8s-master templates]# helm install stable/mysql -n my
输出的实际上是 values.yaml 的内容。阅读注释就可以知道 MySQL chart 支持哪些参数,安装之前需要做哪些准备。其中有一部分是关于存储的:
1 2 3 4 5 6 7 8 9 10 11 12 13
## Persist data to a persistent volume persistence: enabled: true ## database data Persistent Volume Storage Class ## If defined, storageClassName: <storageClass> ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. (gp2 on AWS, standard on ## GKE, AWS & OpenStack) ## # storageClass: "-" accessMode: ReadWriteOnce size: 8Gi
[root@k8s-master volumes]# kubectl apply -f mysql-pv.yaml persistentvolume/mysql-pv2 created
[root@k8s-master volumes]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE mysql-pv2 8Gi RWO Retain Available 5s
[root@k8s-master ~]# helm upgrade --set imageTag=5.7.15 my stable/mysql Release "my" has been upgraded. Happy Helming! LAST DEPLOYED: Tue Oct 30 23:42:36 2018 ...... [root@k8s-master ~]# kubectl get deployment my-mysql -o wide NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR my-mysql 1 1 1 0 11m my-mysql mysql:5.7.15 app=my-mysql
helm history 可以查看 release 所有的版本。通过 helm rollback 可以回滚到任何版本。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
[root@k8s-master ~]# helm history my REVISION UPDATED STATUS CHART DESCRIPTION 1 Tue Oct 30 23:31:42 2018 SUPERSEDED mysql-0.3.5 Install complete 2 Tue Oct 30 23:42:36 2018 DEPLOYED mysql-0.3.5 Upgrade complete [root@k8s-master ~]# helm rollback my 1 Rollback was a success! Happy Helming! 回滚成功,MySQL 恢复到 5.7.14。
[root@k8s-master ~]# helm history my REVISION UPDATED STATUS CHART DESCRIPTION 1 Tue Oct 30 23:31:42 2018 SUPERSEDED mysql-0.3.5 Install complete 2 Tue Oct 30 23:42:36 2018 SUPERSEDED mysql-0.3.5 Upgrade complete 3 Tue Oct 30 23:44:28 2018 DEPLOYED mysql-0.3.5 Rollback to 1 [root@k8s-master ~]# kubectl get deployment my-mysql -o wide NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR my-mysql 1 1 1 1 13m my-mysql mysql:5.7.14 app=my-mysql
[root@k8s-master ~]# helm install mychart NAME: anxious-wasp LAST DEPLOYED: Wed Oct 31 01:57:15 2018 NAMESPACE: default STATUS: DEPLOYED
RESOURCES: ==> v1/Pod(related) NAME READY STATUS RESTARTS AGE anxious-wasp-mychart-94fcbf7d-dg5qn 0/1 ContainerCreating 0 0s
==> v1/Service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE anxious-wasp-mychart ClusterIP 10.111.51.71 <none> 80/TCP 0s
==> v1beta2/Deployment NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE anxious-wasp-mychart 1 1 1 0 0s
NOTES: 1. Get the application URL by running these commands: export POD_NAME=$(kubectl get pods --namespace default -l "app=mychart,release=anxious-wasp" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1:8080 to use your application" kubectl port-forward $POD_NAME 8080:80
当 chart 部署到 Kubernetes 集群,便可以对其进行更为全面的测试。
(4)将chart添加到仓库
chart 通过测试后可以将其添加到仓库,团队其他成员就能够使用。任何 HTTP Server 都可以用作 chart 仓库,下面演示在 k8s-node1192.168.56.12 上搭建仓库。
(5)通过 helm repo add 将新仓库添加到 Helm。 [root@k8s-master ~]# helm repo add newrepo http://192.168.56.12:8080/charts "newrepo" has been added to your repositories [root@k8s-master ~]# helm repo list NAME URL local http://127.0.0.1:8879/charts stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts newrepo http://192.168.56.12:8080/charts
(6)现在已经可以 repo search 到 mychart 了。 [root@k8s-master ~]# helm search mychart NAME CHART VERSION APP VERSION DESCRIPTION local/mychart 0.1.0 1.0 A Helm chart for Kubernetes newrepo/mychart 0.1.0 1.0 A Helm chart for Kubernetes
除了 newrepo/mychart,这里还有一个 local/mychart。这是因为在执行第 2 步打包操作的同时,mychart 也被同步到了 local 的仓库。
(8)如果以后仓库添加了新的 chart,需要用 helm repo update 更新本地的 index。 [root@k8s-master ~]# helm repo update Hang tight while we grab the latest from your chart repositories... ...Skip local chart repository ...Successfully got an update from the "newrepo" chart repository ...Successfully got an update from the "stable" chart repository Update Complete. ⎈ Happy Helming!⎈
[root@k8s-master k8s-prom]# kubectl get all -n prom NAME READY STATUS RESTARTS AGE pod/prometheus-node-exporter-6srrq 1/1 Running 0 11m pod/prometheus-node-exporter-fftmc 1/1 Running 0 11m pod/prometheus-node-exporter-qlr8d 1/1 Running 0 11m pod/prometheus-server-66cbd4c6b-j9lqr 1/1 Running 0 4m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/prometheus NodePort 10.96.65.72 <none> 9090:30090/TCP 10m service/prometheus-node-exporter ClusterIP None <none> 9100/TCP 11m
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/prometheus-node-exporter 3 3 3 3 3 <none> 11m
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE deployment.apps/prometheus-server 1 1 1 1 10m
NAME DESIRED CURRENT READY AGE replicaset.apps/prometheus-server-65f5d59585 0 0 0 10m replicaset.apps/prometheus-server-66cbd4c6b 1 1 1 4m
2.4、部署kube-sate-metrics
1 2 3 4 5 6 7 8 9 10 11 12 13 14
[root@k8s-master k8s-prom]# kubectl apply -f kube-state-metrics/ deployment.apps/kube-state-metrics created serviceaccount/kube-state-metrics created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created service/kube-state-metrics created
[root@k8s-master k8s-prom]# kubectl apply -f k8s-prometheus-adapter/ clusterrolebinding.rbac.authorization.k8s.io/custom-metrics:system:auth-delegator created rolebinding.rbac.authorization.k8s.io/custom-metrics-auth-reader created deployment.apps/custom-metrics-apiserver created clusterrolebinding.rbac.authorization.k8s.io/custom-metrics-resource-reader created serviceaccount/custom-metrics-apiserver created service/custom-metrics-apiserver created apiservice.apiregistration.k8s.io/v1beta1.custom.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/custom-metrics-server-resources created clusterrole.rbac.authorization.k8s.io/custom-metrics-resource-reader created clusterrolebinding.rbac.authorization.k8s.io/hpa-controller-custom-metrics created configmap/adapter-config created
[root@k8s-master k8s-prom]# cat grafana.yaml apiVersion: apps/v1 kind: Deployment metadata: name: monitoring-grafana namespace: prom #修改名称空间 spec: replicas: 1 selector: matchLabels: task: monitoring k8s-app: grafana template: metadata: labels: task: monitoring k8s-app: grafana spec: containers: - name: grafana image: registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-grafana-amd64:v5.0.4 ports: - containerPort: 3000 protocol: TCP volumeMounts: - mountPath: /etc/ssl/certs name: ca-certificates readOnly: true - mountPath: /var name: grafana-storage env: #这里使用的是原先的heapster的grafana的配置文件,需要注释掉这个环境变量 #- name: INFLUXDB_HOST # value: monitoring-influxdb - name: GF_SERVER_HTTP_PORT value: "3000" # The following env variables are required to make Grafana accessible via # the kubernetes api-server proxy. On production clusters, we recommend # removing these env variables, setup auth for grafana, and expose the grafana # service using a LoadBalancer or a public IP. - name: GF_AUTH_BASIC_ENABLED value: "false" - name: GF_AUTH_ANONYMOUS_ENABLED value: "true" - name: GF_AUTH_ANONYMOUS_ORG_ROLE value: Admin - name: GF_SERVER_ROOT_URL # If you're only using the API Server proxy, set this value instead: # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy value: / volumes: - name: ca-certificates hostPath: path: /etc/ssl/certs - name: grafana-storage emptyDir: {} --- apiVersion: v1 kind: Service metadata: labels: # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) # If you are NOT using this as an addon, you should comment out this line. kubernetes.io/cluster-service: 'true' kubernetes.io/name: monitoring-grafana name: monitoring-grafana namespace: prom spec: # In a production setup, we recommend accessing Grafana through an external Loadbalancer # or through a public IP. # type: LoadBalancer # You could also use NodePort to expose the service at a randomly-generated port type: NodePort ports: - port: 80 targetPort: 3000 selector: k8s-app: grafana
[root@k8s-master k8s-prom]# kubectl apply -f grafana.yaml deployment.apps/monitoring-grafana created service/monitoring-grafana created