环境准备

linux 系统

[root@master-01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.41 master-01
192.168.1.42 master-02
192.168.1.43 master-03[root@master-01 ~]# cat /etc/redhat-release
CentOS Linux release 7.4.1708 (Core) 

创建 验证(证书)

安装 cfssl

yum install wget mkdir -p /opt/local/cfsslcd /opt/local/cfsslwget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
mv cfssl_linux-amd64 cfsslwget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssljson_linux-amd64 cfssljsonwget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 cfssl-certinfochmod +x *[root@master-01 cfssl]# ls -l
总用量 18808
-rwxr-xr-x 1 root root 10376657 3月  30 2016 cfssl
-rwxr-xr-x 1 root root  6595195 3月  30 2016 cfssl-certinfo
-rwxr-xr-x 1 root root  2277873 3月  30 2016 cfssljson

创建 CA 证书配置

mkdir /opt/sslcd /opt/ssl
# config.json 文件vi  config.json{"signing": {"default": {"expiry": "87600h"},"profiles": {"kubernetes": {"usages": ["signing","key encipherment","server auth","client auth"],"expiry": "87600h"}}}
}
# csr.json 文件vi csr.json{"CN": "kubernetes","key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "ShenZhen","L": "ShenZhen","O": "k8s","OU": "System"}]
}

生成 CA 证书和私钥

[root@master-01 ssl]# /opt/local/cfssl/cfssl gencert -initca csr.json | /opt/local/cfssl/cfssljson -bare ca
2018/04/29 18:07:11 [INFO] generating a new CA key and certificate from CSR
2018/04/29 18:07:11 [INFO] generate received request
2018/04/29 18:07:11 [INFO] received CSR
2018/04/29 18:07:11 [INFO] generating key: rsa-2048
2018/04/29 18:07:11 [INFO] encoded CSR
2018/04/29 18:07:11 [INFO] signed certificate with serial number 291408426601725634356434729258994816542793925675
[root@master-01 ssl]# ls -l
总用量 20
-rw-r--r-- 1 root root 1005 4月  29 18:07 ca.csr
-rw------- 1 root root 1675 4月  29 18:07 ca-key.pem
-rw-r--r-- 1 root root 1363 4月  29 18:07 ca.pem
-rw-r--r-- 1 root root  292 4月  29 18:05 config.json
-rw-r--r-- 1 root root  210 4月  29 18:05 csr.json

分发证书(在每个节点上执行)

# 创建证书目录
mkdir -p /etc/kubernetes/ssl# 拷贝所有文件到目录下
cp *.pem /etc/kubernetes/ssl
cp ca.csr /etc/kubernetes/ssl
# 拷贝ca到其他节点
scp * root@192.168.1.42:/etc/kubernetes/ssl
scp * root@192.168.1.43:/etc/kubernetes/ssl

安装Docker

yum install docker -y 

添加加速器

vi /etc/docker/daemon.json
{"registry-mirrors": ["http://579fe187.m.daocloud.io","https://pee6w651.mirror.aliyuncs.com"]}

启动docker

systemctl enable docker
systemctl start docker
systemctl status docker

安装etcd集群

在你所要安装etcd的节点执行

yum install etcd3 -y

创建 etcd 证书

cd /opt/ssl/vi etcd-csr.json
{"CN": "etcd","hosts": ["127.0.0.1","192.168.1.41","192.168.1.42","192.168.1.43"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "ShenZhen","L": "ShenZhen","O": "k8s","OU": "System"}]
}
# 生成 etcd   密钥
/opt/local/cfssl/cfssl gencert -ca=/opt/ssl/ca.pem \-ca-key=/opt/ssl/ca-key.pem \  -config=/opt/ssl/config.json \  -profile=kubernetes etcd-csr.json | /opt/local/cfssl/cfssljson -bare etcd
# 查看生成[root@master-01 ssl]# ls -l etcd*
-rw-r--r-- 1 root root 1066 429 18:15 etcd.csr
-rw-r--r-- 1 root root  298 429 18:14 etcd-csr.json
-rw------- 1 root root 1675 429 18:15 etcd-key.pem
-rw-r--r-- 1 root root 1440 429 18:15 etcd.pem# 拷贝到etcd服务器# etcd-1
cp etcd*.pem /etc/kubernetes/ssl/# etcd-2
scp etcd*.pem root@192.168.1.42:/etc/kubernetes/ssl/# etcd-3
scp etcd*.pem root@192.168.1.42:/etc/kubernetes/ssl/# 如果 etcd 非 root 用户,读取证书会提示没权限chmod 644 /etc/kubernetes/ssl/etcd-key.pem

修改 etcd 配置

#etcd1
# [member]
ETCD_NAME=etcd1
ETCD_DATA_DIR="/var/lib/etcd/etcd1.etcd"
ETCD_WAL_DIR="/var/lib/etcd/wal"
ETCD_SNAPSHOT_COUNT="100"
ETCD_HEARTBEAT_INTERVAL="100"
ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.1.41:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.41:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_MAX_WALS="5"
#ETCD_CORS=""# [cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.41:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.41:2380,etcd2=https://192.168.1.42:2380,etcd3=https://192.168.1.43:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.41:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"# [proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"# [security]
ETCD_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_AUTO_TLS="true"
ETCD_PEER_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_PEER_AUTO_TLS="true"# [logging]
#ETCD_DEBUG="false"
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""
#etcd2
# [member]
ETCD_NAME=etcd2
ETCD_DATA_DIR="/var/lib/etcd/etcd2.etcd"
ETCD_WAL_DIR="/var/lib/etcd/wal"
ETCD_SNAPSHOT_COUNT="100"
ETCD_HEARTBEAT_INTERVAL="100"
ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.1.42:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.42:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_MAX_WALS="5"
#ETCD_CORS=""# [cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.42:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.41:2380,etcd2=https://192.168.1.42:2380,etcd3=https://192.168.1.43:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.42:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"# [proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"# [security]
ETCD_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_AUTO_TLS="true"
ETCD_PEER_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_PEER_AUTO_TLS="true"# [logging]
#ETCD_DEBUG="false"
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""
#etcd3
# [member]
ETCD_NAME=etcd3
ETCD_DATA_DIR="/var/lib/etcd/etcd3.etcd"
ETCD_WAL_DIR="/var/lib/etcd/wal"
ETCD_SNAPSHOT_COUNT="100"
ETCD_HEARTBEAT_INTERVAL="100"
ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.1.43:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.1.43:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_MAX_WALS="5"
#ETCD_CORS=""# [cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.43:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.41:2380,etcd2=https://192.168.1.42:2380,etcd3=https://192.168.1.43:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.43:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"# [proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"# [security]
ETCD_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_AUTO_TLS="true"
ETCD_PEER_CERT_FILE="/etc/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/kubernetes/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/etc/kubernetes/ssl/ca.pem"
ETCD_PEER_AUTO_TLS="true"# [logging]
#ETCD_DEBUG="false"
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""

启动 etcd
分别启动 所有节点的 etcd 服务

systemctl enable etcd
systemctl start etcd
systemctl status etcd# 如果报错 请使用
journalctl -f -t etcd  和 journalctl -u etcd 来定位问题

验证 etcd 集群状态

查看 etcd 集群状态

etcdctl --endpoints=https://192.168.1.41:2379,https://192.168.1.42:2379,https://192.168.1.43:2379\--cert-file=/etc/kubernetes/ssl/etcd.pem \--ca-file=/etc/kubernetes/ssl/ca.pem \--key-file=/etc/kubernetes/ssl/etcd-key.pem \cluster-healthmember 46af81257a43d032 is healthy: got healthy result from https://192.168.1.43:2379
member d1511678caec8c7e is healthy: got healthy result from https://192.168.1.42:2379
member df617030b25aba3c is healthy: got healthy result from https://192.168.1.41:2379
cluster is healthy

查看 etcd 集群成员

[root@master-03 ~]# etcdctl --endpoints=https://192.168.1.41:2379,https://192.168.1.42:2379,https://192.168.1.43:2379\
>         --cert-file=/etc/kubernetes/ssl/etcd.pem \
>         --ca-file=/etc/kubernetes/ssl/ca.pem \
>         --key-file=/etc/kubernetes/ssl/etcd-key.pem \
>         member list
46af81257a43d032: name=etcd3 peerURLs=https://192.168.1.43:2380 clientURLs=https://192.168.1.43:2379 isLeader=true
d1511678caec8c7e: name=etcd2 peerURLs=https://192.168.1.42:2380 clientURLs=https://192.168.1.42:2379 isLeader=false
df617030b25aba3c: name=etcd1 peerURLs=https://192.168.1.41:2380 clientURLs=https://192.168.1.41:2379 isLeader=false

配置 Kubernetes 集群

安装组件(每个节点都要安装)

# 从github 上下载版本cd /tmpwget https://dl.k8s.io/v1.8.8/kubernetes-server-linux-amd64.tar.gztar -xzvf kubernetes-server-linux-amd64.tar.gzcd kubernetescp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/

创建 admin 证书

vi admin-csr.json
{"CN": "admin","hosts": [],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "ShenZhen","L": "ShenZhen","O": "system:masters","OU": "System"}]
}
# 生成 admin 证书和私钥
cd /opt/ssl/
/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \-ca-key=/etc/kubernetes/ssl/ca-key.pem \  -config=/opt/ssl/config.json \  -profile=kubernetes admin-csr.json | /opt/local/cfssl/cfssljson -bare admin# 查看生成[root@master-01 ssl]# ls -l admin*
-rw-r--r-- 1 root root 1013 429 18:36 admin.csr-rw-r--r-- 1 root root  231 429 18:35 admin-csr.json-rw------- 1 root root 1679 429 18:36 admin-key.pem-rw-r--r-- 1 root root 1407 429 18:36 admin.pem
cp admin*.pem /etc/kubernetes/ssl/scp admin*.pem root@192.168.1.42:/etc/kubernetes/ssl/scp admin*.pem root@192.168.1.43:/etc/kubernetes/ssl/

配置 kubectl kubeconfig 文件
生成证书相关的配置文件存储与 /root/.kube 目录中

# 配置 kubernetes 集群kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/ssl/ca.pem \  --embed-certs=true \  --server=https://127.0.0.1:6443# 配置 客户端认证kubectl config set-credentials admin \--client-certificate=/etc/kubernetes/ssl/admin.pem \  --embed-certs=true \  --client-key=/etc/kubernetes/ssl/admin-key.pemkubectl config set-context kubernetes \--cluster=kubernetes \  --user=adminkubectl config use-context kubernetesmkdir ~/.kube/
scp -r ~/.kube/ root@192.168.1.42:~/.kube/scp ~/.kube/ root@192.168.1.43:~/.kube/

创建 kubernetes 证书

cd /opt/sslvi kubernetes-csr.json{"CN": "kubernetes","hosts": ["127.0.0.1","192.168.1.41","192.168.1.42","192.168.1.43","192.254.0.1","kubernetes","kubernetes.default","kubernetes.default.svc","kubernetes.default.svc.cluster","kubernetes.default.svc.cluster.local"],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "ShenZhen","L": "ShenZhen","O": "k8s","OU": "System"}]
}## 这里 hosts 字段中 三个 IP 分别为 127.0.0.1 本机, 192.168.1.41 和 192.168.1.42,192.168.1.43 为 Master 的IP,多个Master需要写多个 192.254.0.1 为 kubernetes SVC 的 IP, 一般是 部署网络的第一个IP , 如: 192.254.0.1 , 在启动完成后,我们使用   kubectl get svc , 就可以查看到

生成 kubernetes 证书和私钥

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \-ca-key=/etc/kubernetes/ssl/ca-key.pem \  -config=/opt/ssl/config.json \  -profile=kubernetes kubernetes-csr.json | /opt/local/cfssl/cfssljson -bare kubernetes
# 查看生成[root@master-01 ssl]# ls -l kubernetes*
-rw-r--r-- 1 root root 1261 429 18:51 kubernetes.csr-rw-r--r-- 1 root root  479 429 18:50 kubernetes-csr.json-rw------- 1 root root 1675 429 18:51 kubernetes-key.pem-rw-r--r-- 1 root root 1635 429 18:51 kubernetes.pem# 拷贝到目录
cp kubernetes*.pem /etc/kubernetes/ssl/scp kubernetes*.pem root@192.168.1.42:/etc/kubernetes/ssl/scp kubernetes*.pem root@192.168.1.43:/etc/kubernetes/ssl/

配置 kube-apiserver
kubelet 首次启动时向 kube-apiserver 发送 TLS Bootstrapping 请求,kube-apiserver 验证 kubelet 请求中的 token 是否与它配置的 token 一致,如果一致则自动为 kubelet生成证书和秘钥。

# 生成 token[root@master-01 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
aeaddee30713d7c68ef64deb71c0ac24# 创建 token.csv 文件cd /opt/sslvi token.csvaeaddee30713d7c68ef64deb71c0ac24,kubelet-bootstrap,10001,"system:kubelet-bootstrap"# 拷贝cp token.csv /etc/kubernetes/scp token.csv root@192.168.1.42:/etc/kubernetes/scp token.csv root@192.168.1.42:/etc/kubernetes/
# 生成高级审核配置文件cd /etc/kubernetescat >> audit-policy.yaml <<EOF
# Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
- level: Metadata
EOF# 拷贝scp audit-policy.yaml root@192.168.1.42:/etc/kubernetes/scp audit-policy.yaml root@192.168.1.43:/etc/kubernetes/

创建 kube-apiserver.service 文件

# 自定义 系统 service 文件一般存于 /etc/systemd/system/ 下
# 配置为 各自的本地 IPvi /etc/systemd/system/kube-apiserver.service[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \  --advertise-address=192.168.1.41 \  --allow-privileged=true \  --apiserver-count=3 \  --audit-policy-file=/etc/kubernetes/audit-policy.yaml \  --audit-log-maxage=30 \  --audit-log-maxbackup=3 \  --audit-log-maxsize=100 \  --audit-log-path=/var/log/kubernetes/audit.log \  --authorization-mode=Node,RBAC \  --bind-address=0.0.0.0 \  --secure-port=6443 \  --client-ca-file=/etc/kubernetes/ssl/ca.pem \  --enable-swagger-ui=true \  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \  --etcd-servers=https://192.168.1.41:2379,https://192.168.1.42:2379,https://192.168.1.43:2379 \  --event-ttl=1h \  --kubelet-https=true \  --insecure-bind-address=127.0.0.1 \  --insecure-port=8080 \  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \  --service-cluster-ip-range=192.254.0.0/16 \  --service-node-port-range=30000-32000 \  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \  --enable-bootstrap-token-auth \  --token-auth-file=/etc/kubernetes/token.csv \  --v=2Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536[Install]
WantedBy=multi-user.target
# k8s 1.8 添加 --authorization-mode=Node
# k8s 1.8 添加 --admission-control=NodeRestriction
# k8s 1.8 添加 --audit-policy-file=/etc/kubernetes/audit-policy.yaml# 这里面要注意的是 --service-node-port-range=30000-32000
# 这个地方是 映射外部端口时 的端口范围,随机映射也在这个范围内映射,指定映射端口必须也在这个范围内。

启动 kube-apiserver

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

配置 kube-controller-manager

# 创建 kube-controller-manager.service 文件vi /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes[Service]
ExecStart=/usr/local/bin/kube-controller-manager \--address=0.0.0.0 \  --master=http://127.0.0.1:8080 \  --allocate-node-cidrs=true \  --service-cluster-ip-range=192.254.0.0/16 \  --cluster-cidr=192.233.0.0/16 \  --cluster-name=kubernetes \  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \  --root-ca-file=/etc/kubernetes/ssl/ca.pem \  --leader-elect=true \  --v=2Restart=on-failure
RestartSec=5[Install]
WantedBy=multi-user.target

启动 kube-controller-manager

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

配置 kube-scheduler

# 创建 kube-cheduler.service 文件vi /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes[Service]
ExecStart=/usr/local/bin/kube-scheduler \--address=0.0.0.0 \  --master=http://127.0.0.1:8080 \  --leader-elect=true \  --v=2Restart=on-failure
RestartSec=5[Install]
WantedBy=multi-user.target

启动 kube-scheduler

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler

验证 Master 节点

[root@master-01 kubernetes]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-2               Healthy   {"health": "true"}
etcd-0               Healthy   {"health": "true"}
etcd-1               Healthy   {"health": "true"} [root@master-02 kubernetes]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-1               Healthy   {"health": "true"}
etcd-0               Healthy   {"health": "true"}
etcd-2               Healthy   {"health": "true"}  [root@master-03 kubernetes]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-2               Healthy   {"health": "true"}
etcd-0               Healthy   {"health": "true"}
etcd-1               Healthy   {"health": "true"} 

配置 kubelet
kubelet 启动时向 kube-apiserver 发送 TLS bootstrapping 请求,需要先将 bootstrap token 文件中的 kubelet-bootstrap 用户赋予 system:node-bootstrapper 角色,然后 kubelet 才有权限创建认证请求(certificatesigningrequests)。

# 先创建认证请求
# user 为 master 中 token.csv 文件里配置的用户
# 只需创建一次就可以kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

创建 kubelet kubeconfig 文件

# 配置集群kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/ssl/ca.pem \  --embed-certs=true \  --server=https://127.0.0.1:6443 \  --kubeconfig=bootstrap.kubeconfig
# 配置客户端认证kubectl config set-credentials kubelet-bootstrap \--token=aeaddee30713d7c68ef64deb71c0ac24 \  --kubeconfig=bootstrap.kubeconfig# 配置关联kubectl config set-context default \--cluster=kubernetes \  --user=kubelet-bootstrap \  --kubeconfig=bootstrap.kubeconfig# 配置默认关联
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig# 拷贝生成的 bootstrap.kubeconfig 文件mv bootstrap.kubeconfig /etc/kubernetes/

创建 kubelet.service 文件

# 创建 kubelet 目录> 配置为 node 本机 IPmkdir /var/lib/kubeletvi /etc/systemd/system/kubelet.service[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \--cgroup-driver=systemd \  --hostname-override=master-01 \  --pod-infra-container-image=jicki/pause-amd64:3.0 \  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \  --cert-dir=/etc/kubernetes/ssl \  --cluster_dns=192.254.0.2 \  --cluster_domain=cluster.local. \  --hairpin-mode promiscuous-bridge \  --allow-privileged=true \  --fail-swap-on=false \  --serialize-image-pulls=false \  --logtostderr=true \  --max-pods=512 \  --v=2
[Install]
WantedBy=multi-user.target
# 如上配置:
master-01    本机hostname
192.254.0.2       预分配的 dns 地址
cluster.local.   为 kubernetes 集群的 domain
jicki/pause-amd64:3.0  这个是 pod 的基础镜像,既 gcr 的 gcr.io/google_containers/pause-amd64:3.0 镜像, 下载下来修改为自己的仓库中的比较快。

启动kubelet

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet# 如果报错 请使用
journalctl -f -t kubelet  和 journalctl -u kubelet 来定位问题

配置 TLS 认证

[root@master-03 kubernetes]# kubectl get csr
NAME                                                  AGE     REQUESTOR          CONDITION
node-csr-9EWT-ZMXLm83nfGkXUWbf5BfAbHT4DoIwiUzKdMxhe8   4m        kubelet-bootstrap   Pending
node-csr-GOarlm9yyjQdX2QglkvG6_T8QmSo2EvEqULxYQY3TFg   14s       kubelet-bootstrap   Pending
node-csr-H2srX4AqkJU-FtOPco1jMJTqrhRnOF3ogLulLVfAYeE   31s       kubelet-bootstrap   Pending# 增加 认证kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve

验证 nodes

[root@master-03 kubernetes]# kubectl get nodes
NAME        STATUS    ROLES     AGE       VERSION
master-01   Ready     <none>    2m        v1.8.8
master-02   Ready     <none>    1m        v1.8.8
master-03   Ready     <none>    15s       v1.8.8[root@master-03 kubernetes]# ls /etc/kubernetes/kubelet.kubeconfig
/etc/kubernetes/kubelet.kubeconfigroot@master-03 kubernetes]# ls /etc/kubernetes/ssl/kubelet*
/etc/kubernetes/ssl/kubelet-client.crt  /etc/kubernetes/ssl/kubelet-client.key  /etc/kubernetes/ssl/kubelet.crt  /etc/kubernetes/ssl/kubelet.key

配置 kube-proxy

创建 kube-proxy 证书

 cd /opt/sslvi kube-proxy-csr.json{"CN": "system:kube-proxy","hosts": [],"key": {"algo": "rsa","size": 2048},"names": [{"C": "CN","ST": "ShenZhen","L": "ShenZhen","O": "k8s","OU": "System"}]
}

生成 kube-proxy 证书和私钥

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \-ca-key=/etc/kubernetes/ssl/ca-key.pem \-config=/opt/ssl/config.json \-profile=kubernetes  kube-proxy-csr.json | /opt/local/cfssl/cfssljson -bare kube-proxy# 查看生成
ls kube-proxy*
kube-proxy.csr  kube-proxy-csr.json  kube-proxy-key.pem  kube-proxy.pem# 拷贝到目录
cp kube-proxy*.pem /etc/kubernetes/ssl/scp kube-proxy*.pem root@192.168.1.42:/etc/kubernetes/ssl/scp kube-proxy*.pem root@192.168.1.43:/etc/kubernetes/ssl/

创建 kube-proxy kubeconfig 文件

# 配置集群kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/ssl/ca.pem \  --embed-certs=true \  --server=https://127.0.0.1:6443 \  --kubeconfig=kube-proxy.kubeconfig# 配置客户端认证kubectl config set-credentials kube-proxy \--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \  --embed-certs=true \  --kubeconfig=kube-proxy.kubeconfig# 配置关联kubectl config set-context default \--cluster=kubernetes \  --user=kube-proxy \  --kubeconfig=kube-proxy.kubeconfig# 配置默认关联
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig# 拷贝到目录
mv kube-proxy.kubeconfig /etc/kubernetes/

创建 kube-proxy.service 文件

# 创建 kube-proxy 目录mkdir -p /var/lib/kube-proxyvi /etc/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \--bind-address=192.168.1.41 \  --hostname-override=master-01 \  --cluster-cidr=192.254.0.0/16 \  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \  --logtostderr=true \  --v=2Restart=on-failure
RestartSec=5
LimitNOFILE=65536[Install]
WantedBy=multi-user.target

启动 kube-proxy

systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
# 如果报错 请使用
journalctl -f -t kube-proxy  和 journalctl -u kube-proxy 来定位问题

Node 端

单 Node 部分 需要部署的组件有 docker calico kubectl kubelet kube-proxy 这几个组件。 Node 节点 基于 Nginx 负载 API 做 Master HA

# master 之间除 api server 以外其他组件通过 etcd 选举,api server 默认不作处理;在每个 node 上启动一个 nginx,每个 nginx 反向代理所有 api server,node 上 kubelet、kube-proxy 连接本地的 nginx 代理端口,当 nginx 发现无法连接后端时会自动踢掉出问题的 api server,从而实现 api server 的 HA

安装组件

cd /tmpwget https://dl.k8s.io/v1.8.3/kubernetes-server-linux-amd64.tar.gztar -xzvf kubernetes-server-linux-amd64.tar.gzcd kubernetescp -r server/bin/{kube-proxy,kubelet,kubectl} /usr/local/bin/
# ALL nodemkdir -p /etc/kubernetes/ssl/scp ca.pem kube-proxy.pem kube-proxy-key.pem root@192.168.1.44:/etc/kubernetes/ssl/

配置 kubelet or kube-proxy

# kubelet# 首先 创建 kubelet kubeconfig 文件kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/ssl/ca.pem \  --embed-certs=true \  --server=https://127.0.0.1:6443 \  --kubeconfig=bootstrap.kubeconfig# 配置客户端认证kubectl config set-credentials kubelet-bootstrap \--token=aeaddee30713d7c68ef64deb71c0ac24 \  --kubeconfig=bootstrap.kubeconfig# 配置关联kubectl config set-context default \--cluster=kubernetes \  --user=kubelet-bootstrap \  --kubeconfig=bootstrap.kubeconfig# 配置默认关联
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig# 拷贝生成的 bootstrap.kubeconfig 文件mv bootstrap.kubeconfig /etc/kubernetes/
# 创建 kube-proxy kubeconfig 文件kubectl config set-cluster kubernetes \--certificate-authority=/etc/kubernetes/ssl/ca.pem \  --embed-certs=true \  --server=https://127.0.0.1:6443 \  --kubeconfig=kube-proxy.kubeconfig# 配置客户端认证kubectl config set-credentials kube-proxy \--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \  --embed-certs=true \  --kubeconfig=kube-proxy.kubeconfig# 配置关联kubectl config set-context default \--cluster=kubernetes \  --user=kube-proxy \  --kubeconfig=kube-proxy.kubeconfig# 配置默认关联
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig# 拷贝到目录
mv kube-proxy.kubeconfig /etc/kubernetes/

创建Nginx 代理

在每个 node 都必须创建一个 Nginx 代理, 这里特别注意, 当 Master 也做为 Node 的时候 不需要配置 Nginx-proxy

# 创建配置目录
mkdir -p /etc/nginx# 写入代理配置
cat <<EOF>> /etc/nginx/nginx.conf
error_log stderr notice;worker_processes auto;
events {multi_accept on;use epoll;worker_connections 1024;
}stream {upstream kube_apiserver {least_conn;server 192.168.1.43:6443;server 192.168.1.42:6443;server 192.168.1.41:6443;}server {listen        0.0.0.0:6443;proxy_pass    kube_apiserver;proxy_timeout 10m;proxy_connect_timeout 1s;}
}
EOF# 更新权限
chmod +r /etc/nginx/nginx.conf
# 配置 Nginx 基于 docker 进程,然后配置 systemd 来启动cat << EOF >> /etc/systemd/system/nginx-proxy.service
[Unit]
Description=kubernetes apiserver docker wrapper
Wants=docker.socket
After=docker.service[Service]
User=root
PermissionsStartOnly=true
ExecStart=/usr/bin/docker run -p 127.0.0.1:6443:6443 \\-v /etc/nginx:/etc/nginx \\--name nginx-proxy \\--net=host \\--restart=on-failure:5 \\--memory=512M \\nginx:1.13.5-alpine
ExecStartPre=-/usr/bin/docker rm -f nginx-proxy
ExecStop=/usr/bin/docker stop nginx-proxy
Restart=always
RestartSec=15s
TimeoutStartSec=30s[Install]
WantedBy=multi-user.target
EOF
# 启动 Nginxsystemctl daemon-reload
systemctl start nginx-proxy
systemctl enable nginx-proxy
systemctl status nginx-proxy# 重启 Node 的 kubelet 与 kube-proxysystemctl restart kubelet
systemctl status kubeletsystemctl restart kube-proxy
systemctl status kube-proxy

Master 配置 TLS 认证

# 查看 csr 的名称[root@master-03 ssl]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-9EWT-ZMXLm83nfGkXUWbf5BfAbHT4DoIwiUzKdMxhe8   13h       kubelet-bootstrap   Approved,Issued
node-csr-GOarlm9yyjQdX2QglkvG6_T8QmSo2EvEqULxYQY3TFg   13h       kubelet-bootstrap   Approved,Issued
node-csr-H2srX4AqkJU-FtOPco1jMJTqrhRnOF3ogLulLVfAYeE   13h       kubelet-bootstrap   Approved,Issued
node-csr-mFuz_pMF88dpDjLhpo5oh3_4Y9Wdh1YLZjIGRo5lwZ8   19s       kubelet-bootstrap   Pendingkubectl certificate approve node-csr-mFuz_pMF88dpDjLhpo5oh3_4Y9Wdh1YLZjIGRo5lwZ8# 增加 认证[root@k8s-master-64 ~]# kubectl certificate approve NAME
[root@master-03 ssl]# kubectl get nodes
NAME        STATUS    ROLES     AGE       VERSION
master-01   Ready     <none>    13h       v1.8.8
master-02   Ready     <none>    13h       v1.8.8
master-03   Ready     <none>    13h       v1.8.8
node-01     Ready     <none>    5m        v1.8.8

配置 Calico 网络

Calico官网地址

# 下载 yaml 文件wget http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/calico.yamlwget http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/rbac.yaml# 下载 镜像# 国外镜像 有墙
quay.io/calico/node:v2.6.0
quay.io/calico/cni:v1.11.0
quay.io/calico/kube-controllers:v1.0.0# 国内镜像
jicki/node:v2.6.0
jicki/cni:v1.11.0
jicki/kube-controllers:v1.0.0docker pull jicki/node:v2.6.0 && docker tag jicki/node:v2.6.0 quay.io/calico/node:v2.6.0docker pull jicki/cni:v1.11.0 && docker tag jicki/cni:v1.11.0 quay.io/calico/cni:v1.11.0docker pull jicki/kube-controllers:v1.0.0 && docker tag jicki/kube-controllers:v1.0.0 quay.io/calico/kube-controllers:v1.0.0

配置 calico

vi calico.yaml# 注意修改如下选项:etcd_endpoints: "https://192.168.1.41:2379,https://192.168.1.42:2379,https://192.168.1.43:2379"etcd_ca: "/calico-secrets/etcd-ca"  etcd_cert: "/calico-secrets/etcd-cert"etcd_key: "/calico-secrets/etcd-key"  # 这里面要写入 base64 的信息data:etcd-key: (cat /etc/kubernetes/ssl/etcd-key.pem | base64 | tr -d '\n')etcd-cert: (cat /etc/kubernetes/ssl/etcd.pem | base64 | tr -d '\n')etcd-ca: (cat /etc/kubernetes/ssl/ca.pem | base64 | tr -d '\n')- name: CALICO_IPV4POOL_CIDRvalue: "192.233.0.0/16"

导入 yaml 文件

[root@master-01 calico]# ls -l
总用量 24
-rw-r--r-- 1 root root 17855 430 08:50 calico.yaml
-rw-r--r-- 1 root root  1131 426 21:45 rbac.yaml
[root@master-01 calico]# pwd
/root/calico
[root@master-01 calico]# ls -l
总用量 24
-rw-r--r-- 1 root root 17855 430 08:50 calico.yaml
-rw-r--r-- 1 root root  1131 426 21:45 rbac.yaml
[root@master-01 calico]# kubectl apply -f .
configmap "calico-config" created
secret "calico-etcd-secrets" created
daemonset "calico-node" created
deployment "calico-kube-controllers" created
deployment "calico-policy-controller" created
serviceaccount "calico-kube-controllers" created
serviceaccount "calico-node" created
clusterrole "calico-kube-controllers" created
clusterrolebinding "calico-kube-controllers" created
clusterrole "calico-node" created
clusterrolebinding "calico-node" created

查看calico是否启动成功

[root@master-01 calico]# kubectl get pods -n kube-system
NAME                                      READY     STATUS    RESTARTS   AGE
calico-kube-controllers-565ff95f7-28zr8   1/1       Running   0          7m
calico-node-fggk6                         2/2       Running   0          7m
calico-node-jmpbc                         2/2       Running   0          7m
calico-node-pld72                         2/2       Running   0          7m
calico-node-q5tbz                         2/2       Running   0          7m

配置 kubelet.service

vi /etc/systemd/system/kubelet.service# 增加 如下配置--network-plugin=cni \# 重新加载配置
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet.service

安装 Calicoctl

cd /usr/local/bin/wget -c  https://github.com/projectcalico/calicoctl/releases/download/v1.6.1/calicoctlchmod +x calicoctlapiVersion: v1
kind: calicoApiConfig
metadata:
spec:
datastoreType: "etcdv3"
etcdEndpoints: "https://192.168.1.41:2379,https://192.168.1.42:2379,https://192.168.1.43:2379"
etcdKeyFile: "/etc/kubernetes/ssl/etcd-key.pem"
etcdCertFile: "/etc/kubernetes/ssl/etcd.pem"
etcdCACertFile: "/etc/kubernetes/ssl/ca.pem"[root@master-01 ~]# calicoctl node status
Calico process is running.IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+--------------+-------------------+-------+----------+-------------+
| 192.168.1.42 | node-to-node mesh | up    | 01:26:03 | Established |
| 192.168.1.44 | node-to-node mesh | up    | 01:30:00 | Established |
| 192.168.1.43 | node-to-node mesh | up    | 01:30:03 | Established |
+--------------+-------------------+-------+----------+-------------+IPv6 BGP status
No IPv6 peers found.

测试集群

# 创建一个 nginx deplymentapiVersion: extensions/v1beta1
kind: Deployment
metadata: name: nginx-dm
spec: replicas: 3template: metadata: labels: name: nginx spec: containers: - name: nginx image: nginx:alpine imagePullPolicy: IfNotPresentports: - containerPort: 80---apiVersion: v1
kind: Service
metadata: name: nginx-svc
spec: ports: - port: 80targetPort: 80protocol: TCP selector: name: nginx
[root@master-01 nginx]# kubectl get pods -owide
NAME                        READY     STATUS    RESTARTS   AGE       IP                NODE
nginx-dm-55b58f68b6-2xzfj   1/1       Running   0          41s       192.168.222.1     master-02
nginx-dm-55b58f68b6-5cfsn   1/1       Running   0          41s       192.168.184.65    master-01
nginx-dm-55b58f68b6-jqwl2   1/1       Running   0          41s       192.168.133.129   master-03
# 在 node 里 curl[root@node-01 kubernetes]# curl 192.168.184.65
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>body {width:35em;margin:0 auto;font-family:Tahoma, Verdana, Arial, sans-serif;}</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p><p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p><p><em>Thank you for using nginx.</em></p>
</body>
</html>

配置 KubeDNS

官方 github yaml

下载镜像

# 官方镜像
k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10
k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10

下载 yaml 文件

curl -O https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/kube-dns.yaml.base# 修改后缀mv kube-dns.yaml.base kube-dns.yaml

修改 kube-dns.yaml

1.# clusterIP: __PILLAR__DNS__SERVER__ 修改为我们之前定义的 dns IP 192.254.0.22.# 修改 --domain=__PILLAR__DNS__DOMAIN__.   为 我们之前 预定的 domain 名称 --domain=cluster.local.3.# 修改 --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053  中 domain 为我们之前预定的 --server=/cluster.local./127.0.0.1#100534.# 修改 --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__, 中的 domain 为我们之前预定的  --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5.# 修改 --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,  中的 domain 为我们之前预定的  --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,
# 导入[root@master-01 nginx]# kubectl create -f kube-dns.yaml
service "kube-dns" created
serviceaccount "kube-dns" created
configmap "kube-dns" created
deployment "kube-dns" created

查看 kubedns 服务

[root@master-01 ~]# kubectl get pods -n kube-system -owide
NAME                                      READY     STATUS    RESTARTS   AGE       IP              NODE
calico-kube-controllers-565ff95f7-28zr8   1/1       Running   0          54m       10.0.2.15       node-01
calico-node-fggk6                         2/2       Running   0          54m       192.168.1.41    master-01
calico-node-jmpbc                         2/2       Running   0          54m       10.0.2.15       node-01
calico-node-pld72                         2/2       Running   0          54m       192.168.1.42    master-02
calico-node-q5tbz                         2/2       Running   0          54m       192.168.1.43    master-03
kube-dns-b9b47f6c4-5bb7c                  3/3       Running   0          17m       192.168.190.1   node-01

验证 dns 服务
在验证 dns 之前,在 dns 未部署之前创建的 pod 与 deployment 等,都必须删除,重新部署,否则无法解析

[root@master-01 nginx]# kubectl delete -f  nginx-deployment.yaml
deployment "nginx-dm" deleted
service "nginx-svc" deleted
[root@master-01 nginx]# kubectl create -f  nginx-deployment.yaml
deployment "nginx-dm" created
service "nginx-svc" created
[root@master-01 nginx]# kubectl get pods
NAME                        READY     STATUS    RESTARTS   AGE
nginx-dm-55b58f68b6-6dx8w   1/1       Running   0          5s
nginx-dm-55b58f68b6-lwhk4   1/1       Running   0          5s
nginx-dm-55b58f68b6-xdr4n   1/1       Running   0          5s[root@master-01 nginx]# kubectl get svc -owide
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE       SELECTOR
kubernetes   ClusterIP   192.254.0.1     <none>        443/TCP   15h       <none>
nginx-svc    ClusterIP   192.254.66.27   <none>        80/TCP    46s       name=nginx# 创建一个 pods 来测试一下 dns
[root@master-01 nginx]# vi test-dns.yamlapiVersion: v1
kind: Pod
metadata:name: alpine
spec:containers:- name: alpineimage: alpinecommand:- sh- -c- while true; do sleep 1; done[root@master-01 nginx]#
[root@master-01 nginx]# kubectl create -f test-dns.yaml
pod "alpine" created
[root@master-01 nginx]# kubectl get pods -owide
NAME                        READY     STATUS    RESTARTS   AGE       IP                NODE
alpine                      1/1       Running   0          9s        192.168.222.3     master-02
nginx-dm-55b58f68b6-6dx8w   1/1       Running   0          2m        192.168.184.66    master-01
nginx-dm-55b58f68b6-lwhk4   1/1       Running   0          2m        192.168.222.2     master-02
nginx-dm-55b58f68b6-xdr4n   1/1       Running   0          2m        192.168.133.130   master-03# 测试[root@master-01 nginx]# kubectl exec -it alpine nslookup nginx-svc
nslookup: can't resolve '(null)': Name does not resolveName:      nginx-svc
Address 1: 192.254.66.27 nginx-svc.default.svc.cluster.local

部署 Ingress 与 Dashboard

官方 dashboard 的github

下载 dashboard 镜像

k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3

下载yaml文件

全部的yaml文件在https://github.com/kubernetes/kubernetes/blob/v1.8.8/cluster/addons/dashboard/dashboard-secret.yaml 路径下

导入 yaml

# 导入文件[root@master-01 dashboard]# kubectl apply -f .
configmap "kubernetes-dashboard-settings" created
serviceaccount "kubernetes-dashboard" created
deployment "kubernetes-dashboard" created
role "kubernetes-dashboard-minimal" created
rolebinding "kubernetes-dashboard-minimal" created
secret "kubernetes-dashboard-certs" created
service "kubernetes-dashboard" created# 查看 svc 与 pod[root@master-01 dashboard]# kubectl get svc -n kube-system
NAME                   TYPE        CLUSTER-IP        EXTERNAL-IP   PORT(S)         AGE
kube-dns               ClusterIP   192.254.0.2       <none>        53/UDP,53/TCP   1h
kubernetes-dashboard   ClusterIP   192.254.235.241   <none>        443/TCP         3m
[root@master-01 dashboard]# kubectl get pods -n kube-system -owide
NAME                                      READY     STATUS    RESTARTS   AGE       IP               NODE
calico-kube-controllers-565ff95f7-28zr8   1/1       Running   0          2h        10.0.2.15        node-01
calico-node-fggk6                         2/2       Running   0          2h        192.168.1.41     master-01
calico-node-jmpbc                         2/2       Running   0          2h        10.0.2.15        node-01
calico-node-pld72                         2/2       Running   0          2h        192.168.1.42     master-02
calico-node-q5tbz                         2/2       Running   0          2h        192.168.1.43     master-03
kube-dns-b9b47f6c4-5bb7c                  3/3       Running   0          1h        192.168.190.1    node-01
kubernetes-dashboard-768854d6dc-zb9rc     1/1       Running   2          2m        192.168.184.71   master-01

如何访问dashboard,请参考:Accessing-Dashboard
我是通过nodePort方式去访问

[root@master-01 dashboard]# kubectl -n kube-system edit service kubernetes-dashboard
service "kubernetes-dashboard" edited
[root@master-01 dashboard]# kubectl -n kube-system get service kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP        EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   192.254.235.241   <none>        443:31545/TCP   11m

访问masterip:31545 就可以了

之所以出现以上的问题,是因为没有配置token或者kubeconfig,所以没权限去访问
RBAC 的问题,解决办法拿到dashboard的token登陆就可以解决(需要base64解码):

root@master-01 ~]# kubectl get secret --all-namespaces
NAMESPACE       NAME                                       TYPE                                  DATA      AGE
default         default-token-k6hqr                        kubernetes.io/service-account-token   3         22h
ingress-nginx   default-token-9v48d                        kubernetes.io/service-account-token   3         5h
ingress-nginx   nginx-ingress-serviceaccount-token-lbhxx   kubernetes.io/service-account-token   3         5h
kube-public     default-token-zgtrk                        kubernetes.io/service-account-token   3         22h
kube-system     calico-etcd-secrets                        Opaque                                3         8h
kube-system     calico-kube-controllers-token-d46bn        kubernetes.io/service-account-token   3         8h
kube-system     calico-node-token-6s4hv                    kubernetes.io/service-account-token   3         8h
kube-system     dashboard-token-g55wc                      kubernetes.io/service-account-token   3         6h
kube-system     default-token-xbw22                        kubernetes.io/service-account-token   3         22h
kube-system     heapster-token-wt55t                       kubernetes.io/service-account-token   3         25m
kube-system     kube-dns-token-xbw5c                       kubernetes.io/service-account-token   3         7h
kube-system     kubernetes-dashboard-certs                 Opaque                                0         5h
kube-system     kubernetes-dashboard-key-holder            Opaque                                2         5h
kube-system     kubernetes-dashboard-token-x8hvz           kubernetes.io/service-account-token   3         5h
[root@master-01 ~]# kubectl get secret -n kube-system dashboard-token-g55wc -oyaml
apiVersion: v1
data:ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR3akNDQXFxZ0F3SUJBZ0lVTXdzMHJpRi9WYzlKODhzWFNCV3NkN1JTQkNzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0ZOb1pXNWFhR1Z1TVJFd0R3WURWUVFIRXdoVAphR1Z1V21obGJqRU1NQW9HQTFVRUNoTURhemh6TVE4d0RRWURWUVFMRXdaVGVYTjBaVzB4RXpBUkJnTlZCQU1UCkNtdDFZbVZ5Ym1WMFpYTXdIaGNOTVRnd05ESTVNVEF3TWpBd1doY05Nak13TkRJNE1UQXdNakF3V2pCbk1Rc3cKQ1FZRFZRUUdFd0pEVGpFUk1BOEdBMVVFQ0JNSVUyaGxibHBvWlc0eEVUQVBCZ05WQkFjVENGTm9aVzVhYUdWdQpNUXd3Q2dZRFZRUUtFd05yT0hNeER6QU5CZ05WQkFzVEJsTjVjM1JsYlRFVE1CRUdBMVVFQXhNS2EzVmlaWEp1ClpYUmxjekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNUHVuWDIvdk4wUWJQdzcKNGo3cDBXNW5GdENRTmlxTkVENDZEbXpCNFA0SVRQRUk2Q1pyczFPTnlJWVhWUUN0djN2UG82d0N0cVFOeU5wbgpMcGg0M2p3YkprRHdEbnhKVEFzb1ZVeXU3Ym5YRVVTOGpiQ2I3cCtzNEFzVFQxQjNLWHdPMURFREFZRkFJZWtYClZMZWdVUUZhVVJyRndYMHJ4cGI0QlBLN04zbzQzK281VmhDa0ZOQTU3VjRFUytFNEZvcGQ2ekFEYTBreFo1QXgKanNLd2s3a20rUnV6NkRNeWplbnl5dXVtZzAxWGRnMU9PaXFpbjczWGs3NWFSZDF1aDZEbmdWSnlRQW9qSDRrawpHUmtqM1NsZGVYSFR2L0xPNmEvU1NiNW1Tczd1alJvUHZURmZUbk0ybWFXRmZCNytnVHZMcGxRRmJKMVdBelJRCklTbW5NNWNDQXdFQUFhTm1NR1F3RGdZRFZSMFBBUUgvQkFRREFnRUdNQklHQTFVZEV3RUIvd1FJTUFZQkFmOEMKQVFJd0hRWURWUjBPQkJZRUZHajFieVJKQlFJbDdRcWZQSjF6UVlrZENlMndNQjhHQTFVZEl3UVlNQmFBRkdqMQpieVJKQlFJbDdRcWZQSjF6UVlrZENlMndNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUFGL0Z5RHhiTmNZcTVMCjY3cTZValI2aE5NNHVpbVVVVVBwUkhhU2VUWmNDNlVGdndQeU5ZNzlmQThPSjJrVFViM0d3bmN6UjJ4bzNab1YKYitXNnBKUWI5TjVnRDV6ekJWelpHN05CQWVoL1pvc2dEZXczdjFpZUdmU0N6a0ZCWFJQRUVVRUlPYkphYWxFVwpRZThuOWJXSHNqNGI3L2VZVVhaMWVqRU1Wd2kvMTVubGNDT2dIZVpYWHNvQ0RmdmVBbUxhWm1LRVhyOHJhOU9OCk5STWRwcGg4eTJidmxUYnpvVGRoV3pRWjY0MUcySExYS2wzMXpVSzhEYnQ0THlpaEsxcjVpRHVRaEdRMENtTzcKVlVvdktsaVlmWURQWFVsdnBWZUN3S2grVWpNSDNIYjdtcWk3akJ2OTdnckxiQmh1UXlhaC9wK004ZHFRcDQ2LwpEMVlHNGRWOAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==namespace: a3ViZS1zeXN0ZW0=token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbExYTjVjM1JsYlNJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpaV0ZqWTI5MWJuUXZjMlZqY21WMExtNWhiV1VpT2lKa1lYTm9ZbTloY21RdGRHOXJaVzR0WnpVMWQyTWlMQ0pyZFdKbGNtNWxkR1Z6TG1sdkwzTmxjblpwWTJWaFkyTnZkVzUwTDNObGNuWnBZMlV0WVdOamIzVnVkQzV1WVcxbElqb2laR0Z6YUdKdllYSmtJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpYSjJhV05sTFdGalkyOTFiblF1ZFdsa0lqb2lOek15TlRFNE9ESXROR015TmkweE1XVTRMVGhrWm1JdE1EZ3dNREkzWmpWaE9EWXpJaXdpYzNWaUlqb2ljM2x6ZEdWdE9uTmxjblpwWTJWaFkyTnZkVzUwT210MVltVXRjM2x6ZEdWdE9tUmhjMmhpYjJGeVpDSjkuSjRYMHR4REVSZUxQNmZ4eFlKT2VJeUJucDRla0FSd3ZKc0ZFbXBxSFZtaEhKWUZadzVRcU5iY1k5ZEU0Y0kya1NqUmhFSFpTTzR5RHVfOGRwSzBzUlgtMjRpWmE0N05IVXVGZU0xTDl6cTZ5ODVkYXlTelg1N05BTVBpYng0MHNleE5BZ2VIZUNESmxQWThiQVhCTG90TGk5WmdRS2FNQldFOFJ2Y0pja1A5VlliVndCSmhtYVNxcXVXaHlwWXVVMmpiZWowVTQ0VmlUNy1xaUpSMTJQUUsycVgySVBxNVlOTmVpcElUdEFvaDVpZUstRmtJenVnMjRSYmxNTTBQYjY4S3VSUFFNbi1mcDk3cEZvS3lKWVhFc1NWQ1FWV2FlYlAtaGpzYlVvWm9iTWhMWWJQUFVWY0Z0cmltWExWS2lhZTRMNjZsRExiOUg5SUQ2ZzNocklB
kind: Secret
metadata:annotations:kubernetes.io/service-account.name: dashboardkubernetes.io/service-account.uid: 73251882-4c26-11e8-8dfb-080027f5a863creationTimestamp: 2018-04-30T03:27:45Zname: dashboard-token-g55wcnamespace: kube-systemresourceVersion: "76660"selfLink: /api/v1/namespaces/kube-system/secrets/dashboard-token-g55wcuid: 732bdb8a-4c26-11e8-8dfb-080027f5a863
type: kubernetes.io/service-account-token


部署 heapster 插件
下载安装文件

wget https://github.com/kubernetes/heapster/archive/v1.5.0.tar.gz
tar xzvf ./v1.5.0.tar.gz
cd ./heapster-1.5.0/kubectl create -f deploy/kube-config/influxdb/
kubectl create -f deploy/kube-config/rbac/heapster-rbac.yaml

访问 grafana
通过kube-proxy访问

 kubectl proxy --address='192.168.1.43' --port=8086 --accept-hosts='^*$'
Starting to serve on 192.168.1.43:8086

部署 Nginx Ingress
Kubernetes 暴露服务的方式目前只有三种:LoadBlancer Service、NodePort Service、Ingress; 什么是 Ingress ? Ingress 就是利用 Nginx Haproxy 等负载均衡工具来暴露 Kubernetes 服务。

官方 Nginx Ingress github

配置 调度 node

# ingress 有多种方式 1.  deployment 自由调度 replicas2.  daemonset 全局调度 分配到所有node里#  deployment 自由调度过程中,由于我们需要 约束 controller 调度到指定的 node 中,所以需要对 node 进行 label 标签# 默认如下:
[root@master-01 dashboard]# kubectl get nodes --show-labels
NAME        STATUS    ROLES     AGE       VERSION   LABELS
master-01   Ready     <none>    16h       v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master-01
master-02   Ready     <none>    16h       v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master-02
master-03   Ready     <none>    16h       v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master-03
node-01     Ready     <none>    2h        v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=node-01# 对 41 与 42 打上 label[root@master-01 dashboard]# kubectl label nodes master-01 ingress=proxy
node "master-01" labeled
[root@master-01 dashboard]# kubectl label nodes master-02 ingress=proxy
node "master-02" labeled
[root@master-01 dashboard]# kubectl get nodes --show-labels
NAME        STATUS    ROLES     AGE       VERSION   LABELS
master-01   Ready     <none>    16h       v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ingress=proxy,kubernetes.io/hostname=master-01
master-02   Ready     <none>    16h       v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ingress=proxy,kubernetes.io/hostname=master-02
master-03   Ready     <none>    16h       v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master-03
node-01     Ready     <none>    2h        v1.8.8    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=node-01
# 部署 Nginx  backend , Nginx backend 用于统一转发 没有的域名 到指定页面。curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yamlcurl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml[root@master-01 ingress]# kubectl create -f namespace.yaml default-backend.yaml
deployment "default-http-backend" created
service "default-http-backend" created[root@master-01 ingress]# kubectl get deployment -n ingress-nginx
NAME                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
default-http-backend   1         1         1            1           4m[root@master-01 ingress]# kubectl get pods -n ingress-nginx -owide
NAME                                    READY     STATUS    RESTARTS   AGE       IP              NODE
default-http-backend-66b447d9cf-mmxrl   1/1       Running   0          3m        192.168.222.8   master-02
# 部署 Ingress RBAC 认证curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml
# 部署 Ingress Controller 组件# 下载 yaml 文件curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml# 上面 对 两个 node 打了 label 所以配置 replicas: 2
# 修改 yaml 文件 增加 rbac 认证 , hostNetwork  还有 nodeSelector, 第二个 spec 下 增加。spec:replicas: 2....spec:hostNetwork: trueserviceAccountName: nginx-ingress-serviceaccountnodeSelector:ingress: proxy....kubectl create -f with-rbac.yaml[root@master-01 ingress]# kubectl get pods -n ingress-nginx -owide
NAME                                       READY   STATUS  RESTARTS AGE     IP              NODE
default-http-backend-66b447d9cf-mmxrl       1/1       Running   0          29m       192.168.222.8    master-02
nginx-ingress-controller-6c995c59d9-dsmfw   1/1       Running   0          1m        192.168.222.10   master-02
nginx-ingress-controller-6c995c59d9-rdvf7   1/1       Running   0          1m        192.168.184.72   master-01
# 查看我们原有的 svc
[root@master-01 ingress]# kubectl get svc -n kube-system
NAME                   TYPE        CLUSTER-IP        EXTERNAL-IP   PORT(S)         AGE
kube-dns               ClusterIP   192.254.0.2       <none>        53/UDP,53/TCP   2h
kubernetes-dashboard   NodePort    192.254.235.241   <none>        443:31545/TCP   53m
# 创建 yaml 文件vi dashboard-ingress.yamlapiVersion: extensions/v1beta1
kind: Ingress
metadata:name: dashboard-ingressnamespace: kube-system
spec:rules:- host: dashboard.jicki.mehttp:paths:- backend:serviceName: kubernetes-dashboardservicePort: 80# 导入 yaml[root@master-01 ingress]# kubectl apply -f dashboard-ingress.yaml
ingress "dashboard-ingress" created[root@master-01 ingress]# kubectl get ingress -n kube-system -o wide
NAME                HOSTS                ADDRESS   PORTS     AGE
dashboard-ingress   dashboard.jicki.me             80        24s验证curl -I dashboard.jicki.me

END
参考:
kubernetes-1.8.3

kubernetes 1.8.8 二进制 证书安装相关推荐

  1. Kubernetes入门教程 --- 使用二进制安装

    Kubernetes入门教程 --- 使用二进制安装 1. Introduction 1.1 架构图 1.2 关键字介绍 1.3 简述 2. 使用Kubeadm Install 2.1 申请三个虚拟环 ...

  2. kubernetes【k8s】adm方式安装[超级详细]

    集群规划[1master节点2node节点] 主机名 IP地址 推荐配置 k8s-master 172.16.115.237 1C2G40G k8s-node1 172.16.115.238 1C2G ...

  3. 部署Kubernetes集群(二进制 v1.18.8版)

    第一章 kubernetes 功能介绍 · kubernetes 从入门到实践 · 看云 二进制 部署Kubernetes集群 组件版本 组件 版本 二进制包下载地址 备注 centos 7.8.20 ...

  4. Kubernetes(K8s)集群安装部署

    Kubernetes(K8s)集群安装 Kubernetes(K8s)集群安装 1 .基本环境配置 1.1.Kubernetes集群规划 1.2.基础环境配置(所有节点) 1.2.1.Host文件修改 ...

  5. 部署Kubernetes集群(二进制 v1.18.5版)

    文章目录 一.Kubernetes的基本知识 1. 安装要求 2. 系统初始化配置 二.部署Etcd集群 2.1 准备cfssl证书生成工具 2.2 生成Etcd证书 2.2.1 自签证书颁发机构(C ...

  6. kubeadm源码分析(内含kubernetes离线包,三步安装)

    k8s离线安装包 三步安装,简单到难以置信 kubeadm源码分析 说句实在话,kubeadm的代码写的真心一般,质量不是很高. 几个关键点来先说一下kubeadm干的几个核心的事: kubeadm ...

  7. centos改变文件拥有者_每天学点之CentOS软件二进制包安装

    在Linux中需要根据不同的需求安装不同的软件服务.在Linux中,软件包分类两种源码包安装与二进制包安装. 一.优缺点: 优点:安装过程简单快速 缺点:无法查看源代码.选择功能不灵活.有依赖性(需要 ...

  8. K8S—二进制部署安装(包含UI界面设置)

    安装步骤 一.准备工作 二.部署单master K8S 2.1 部署etcd集群 master 节点 node 节点(1/2) 查看集群状态 2.2 部署docker引擎 node 节点(1/2) 2 ...

  9. etcd 笔记(02)— etcd 安装(apt 或 yum 安装 、二进制包安装、Docker 安装 etcd、etcd 前端工具etcdkeeper)

    1. 使用 apt 或 yum 安装 etcd 命令如下: sudo apt-get install etcd 或者 sudo yum install etcd 这样安装的缺点是:安装的 etcd 版 ...

最新文章

  1. 坦白讲!90%的数据分析师都不合格!!
  2. PyQt5 笔记(04):主窗口卡死问题
  3. 环境变量方式使用 Secret - 每天5分钟玩转 Docker 容器技术(158)
  4. ajax 返回数组某个属性值,js中sort方法根据数组中对象的某一个属性值进行排序...
  5. 超时,重试,熔断,限流
  6. Angular:where does watchers in scope come from
  7. php mysql 快餐_用PHP+MYSQL做一个简单的点餐系统的后台,初学者. 请高手指点下如何实现后台添加菜名 餐馆 价格...
  8. JeecgUniapp移动框架 2.0版本发布,一份代码多终端适配
  9. Jenkins服务器磁盘空间爆满问题解决
  10. Cocon90.Db调用方法
  11. laravel ajax分页实例,Laravel 的分页功能 - Laravel 5.6 中文文档
  12. linux iptable 使用指南
  13. 抖音seo/抖音搜索排名系统/抖音矩阵优化/抖音seo源码开发,轻松进前十
  14. 平衡二叉树、B树、B+树,B*树的区别与联系
  15. LaTex如何自动生成参考文献
  16. WEB——Request与Response
  17. 【Mybatis框架】初识Mybatis
  18. 怎么使用HTML<table>标签做表格
  19. Websphere8.5.5最新补丁包 :WebSphere Application Server V8.5.5 Fix Pack 15(8.5.5.15)
  20. 关于黑苹果 的安装方法

热门文章

  1. 状态压缩 动规 海贼王 大作业
  2. MPB:山大倪金凤组-白蚁肠道优势厌氧菌的分离与培养
  3. 《小白H5成长之路50》js与PHP配合完成图片上传功能
  4. 修改 docker cgroup 版本的方法 (changing cgroup version)
  5. 每个工位都装监控,既拍屏幕又拍人,深圳公司神操作引热议:坐牢也不这样吧...
  6. 手机端银行卡识别SDK,支持Android、iOS系统
  7. window.print 打印页面指定区域
  8. matplotlib.pyplot中的hist函数简单使用
  9. Fanuc开发技术集-Focas2中英文函数对照表第十九部分
  10. DAG vs. MPP