1.  etcd 部署

yum安装etcd

[root@3a9e34429b75 /]# yum install etcd -y

设置开机自启动

[root@3a9e34429b75 /]# systemctl enable etcd
Created symlink /etc/systemd/system/multi-user.target.wants/etcd.service, pointing to /usr/lib/systemd/system/etcd.service.

检查当前启动的端口

[root@3a9e34429b75 /]# netstat -lnpt
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    

开启etcd

[root@3a9e34429b75 /]# systemctl start etcd

查看到etcd的两个默认端口已经启动

[root@94805217bd96 /]# netstat -lnpt
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      -
tcp        0      0 127.0.0.1:2380          0.0.0.0:*               LISTEN      -        

验证 etcd

[root@94805217bd96 /]# etcdctl set name mayun
mayun
[root@94805217bd96 /]# etcdctl get name
mayun
[root@94805217bd96 /]# etcdctl -C http://127.0.0.1:2379 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://localhost:2379
cluster is healthy
[root@94805217bd96 /]# 

etcd 配置  /etc/etcd/etcd.conf

# [member]
ETCD_NAME=master
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#ETCD_SNAPSHOT_COUNT="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_LISTEN_PEER_URLS="http://localhost:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#
#[cluster]
#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380"
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
#ETCD_INITIAL_CLUSTER="default=http://localhost:2380"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://0.0.0.0:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_STRICT_RECONFIG_CHECK="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#
#[proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#
#[security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_AUTO_TLS="false"
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
#ETCD_PEER_AUTO_TLS="false"
#
#[logging]
#ETCD_DEBUG="false"
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
#ETCD_LOG_PACKAGE_LEVELS=""

配置完成后退出,保存。

2. 安装证书

本文是基于Git-hub中make-ca-cert方式自己建立,方法如下:

先把github中kubernetes代码都下到要安装master的机器上。

# git clone https://github.com/kubernetes/kubernetes

修改make-ca-cert.sh,将第30行修改为kube(基于kube的组进行启动)

# update the below line with the group that exists on Kubernetes Master.
/* Use the user group with which you are planning to run kubernetes services */
cert_group=${CERT_GROUP:-kube}

运行make-ca-cert.sh

# cd kubernetes/cluster/saltbase/salt/generate-cert/
./make-ca-cert.sh 10.20.0.1

这里10.20.0.1是master节点的ip,运行完后发现在/srv/kubernetes目录下已经把相关的key都生成了,把这些key考到所有的minion节点的相同目录。

这里生产的证书就是要在k8s-master中配置用到的。

3.  k8s master 部署

安装 kubernetes

[root@44dd34165e5e /]# yum install kubernetes -y

配置 master,在kubernetes master上需要运行以下组件:

Kubernets API Server      Kubernets Controller Manager      Kubernets Scheduler

配置 /etc/kubernetes/apiserver

###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
## The address on the local server to listen to.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"# The port on the local server to listen on.
# KUBE_API_PORT="--port=8080"# Port minions listen on
# KUBELET_PORT="--kubelet-port=10250"# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://10.3.14.8:2379"# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.20.0.0/16"# default admission control policies
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"#KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"# Add your own!KUBE_API_ARGS="--secure-port=443 --client-ca-file=/srv/kubernetes/ca.crt --tls-cert-file=/srv/kubernetes/server.cert --tls-private-key-file=/srv/kubernetes/server.key"#KUBE_API_ARGS=""

配置 /etc/kubernetes/config

###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://10.3.14.8:8080"

配置 /etc/kubernetes/controller-manager

###
# The following values are used to configure the kubernetes controller-manager# defaults from config and apiserver should be adequate# Add your own!
KUBE_CONTROLLER_MANAGER_ARGS="--root-ca-file=/srv/kubernetes/ca.crt --service-account-private-key-file=/srv/kubernetes/server.key"#KUBE_CONTROLLER_MANAGER_ARGS=""

配置开机自启动,并启动

[root@44dd34165e5e /]# systemctl enable kube-apiserver

[root@44dd34165e5e /]# systemctl start kube-apiserver

[root@44dd34165e5e /]# systemctl enable kube-controller-manager

[root@44dd34165e5e /]# systemctl start kube-controller-manager

[root@44dd34165e5e /]# systemctl enable kube-scheduler

[root@44dd34165e5e /]# systemctl start kube-scheduler

4.  安装node

同安装master的命令一样,

yum install kubernetes

其实 master和node 是可以选择对应的包安装的,我是为了省事,直接把所有的都装上了,根据需要修改和启动。

node主要两部分,kubelet、kube-proxy

vi   /etc/kubernetes/config

###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://10.3.14.8:8080"

vi /etc/kubernetes/kubelet

###
# kubernetes kubelet (minion) config# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=0.0.0.0"# The port for the info server to serve on
# KUBELET_PORT="--port=10250"# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=10.3.14.10"# location of the api-server
KUBELET_API_SERVER="--api-servers=http://10.3.14.8:8080"# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"# Add your own!
KUBELET_ARGS="--cluster_dns=10.20.10.10 --cluster_domain=cluster.local"

设置开机自启动,并启动

 systemctl enable kubelet
 systemctl start kubelet
systemctl enable kube-proxy
systemctl start  kube-proxy

查看启动

[root@iz2ze0fq2isg8vphkpos5sz ~]# netstat -lnpt
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      2021/sshd
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      25448/kubelet
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      25813/kube-proxy
tcp6       0      0 :::10250                :::*                    LISTEN      25448/kubelet
tcp6       0      0 :::10255                :::*                    LISTEN      25448/kubelet
tcp6       0      0 :::4194                 :::*                    LISTEN      25448/kubelet 

注:因服务器资源原因,将原来10.3.14.8 上的  master、etcd  迁到  172.20.4.132 了,肯定有同学看到后面发现ip同前面不对应,这里说明下^_^

5. 安装flannel

flannel 主要是用于集群内的pod 网络划分和通信

安装命令

yum install flannel

配置  vi  /etc/sysconfig/flanneld

# Flanneld configuration options  # etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://10.3.14.8:2379"# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/k8s.com/network"# Any additional options that you want to pass
#FLANNEL_OPTIONS=""

从配置上可以看到 flannel是同过与etcd通信,获取对应的网络配置

因此还需要在etcd上配置,这里的FLANNEL_ETCD_PREFIX 的值就是来自于etcd。


[root@iz2ze0fq2isg8vphkpos5rz /]# etcdctl set /k8s.com/network/config '{ "Network": "172.17.0.0/16" }'
{ "Network": "172.17.0.0/16" }
[root@iz2ze0fq2isg8vphkpos5rz /]# etcdctl get /k8s.com/network/config
{ "Network": "172.17.0.0/16" }
[root@iz2ze0fq2isg8vphkpos5rz /]# 

同时还需要上传节点网络配置

先编写config.json

[root@iz2ze0fq2isg8vphkpos5sz k8s]# vi config.json
{
"Network": "172.17.0.0/16",
"SubnetLen": 24,
"Backend": {"Type": "vxlan","VNI": 7890}}
~

上传命令

[root@iz2ze0fq2isg8vphkpos5sz k8s]# curl -L http://172.20.4.132:2379/v2/keys/k8s.com/network/config -XPUT --data-urlencode value@config.json
{"action":"set","node":{"key":"/k8s.com/network/config","value":"{\n\"Network\": \"172.17.0.0/16\",\n\"SubnetLen\": 24,\n\"Backend\": {\n    \"Type\": \"vxlan\",\n     \"VNI\": 7890\n     }\n }\n","modifiedIndex":69677,"createdIndex":69677},"prevNode":{"key":"/k8s.com/network/config"
,"value":"{\"Network\":\"172.17.0.0/16\"}","modifiedIndex":68840,"createdIndex":68840}}

配置自启动并启动

systemctl enable flanneld
systemctl start flanneld

查看本机ip配置,出现了flannel的网络

[root@iz2ze0fq2isg8vphkpos5uz ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1450inet 172.17.8.1  netmask 255.255.255.0  broadcast 0.0.0.0ether 02:42:37:12:ef:68  txqueuelen 0  (Ethernet)RX packets 2125904  bytes 331673249 (316.3 MiB)RX errors 0  dropped 0  overruns 0  frame 0TX packets 4013911  bytes 464273110 (442.7 MiB)TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500inet 10.3.14.10  netmask 255.255.255.0  broadcast 10.3.14.255ether 00:16:3e:06:1d:eb  txqueuelen 1000  (Ethernet)RX packets 5319293  bytes 1706527445 (1.5 GiB)RX errors 0  dropped 0  overruns 0  frame 0TX packets 3124713  bytes 472762966 (450.8 MiB)TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0flannel.7890: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450inet 172.17.8.0  netmask 255.255.255.255  broadcast 0.0.0.0ether d2:a2:e8:fd:f2:00  txqueuelen 0  (Ethernet)RX packets 2776  bytes 282277 (275.6 KiB)RX errors 0  dropped 0  overruns 0  frame 0TX packets 2551  bytes 376099 (367.2 KiB)TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536inet 127.0.0.1  netmask 255.0.0.0loop  txqueuelen 1  (Local Loopback)RX packets 18  bytes 976 (976.0 B)RX errors 0  dropped 0  overruns 0  frame 0TX packets 18  bytes 976 (976.0 B)TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

我这配置了10和11两个节点

通过k8s master 可以查看管理的节点

[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl get node  -s 10.3.14.8:8080
NAME         STATUS    AGE
10.3.14.10   Ready     14s
10.3.14.11   Ready     2m

6. 安装dashboard

查看当前deployment、svc

[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl get deployment -s 10.3.14.8:8080 --all-namespaces
No resources found.
[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl get svc -s 10.3.14.8:8080  --all-namespaces
NAME         CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   10.20.0.1    <none>        443/TCP   17h

dashboard的deployment

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# Keep the name in sync with image version and
# gce/coreos/kube-manifests/addons/dashboard counterpartsname: kubernetes-dashboard-latestnamespace: kube-system
spec:replicas: 1template:metadata:labels:k8s-app: kubernetes-dashboardversion: latestkubernetes.io/cluster-service: "true"spec:containers:- name: kubernetes-dashboardimage: registry.cn-hangzhou.aliyuncs.com/google-containers/kubernetes-dashboard-amd64:v1.4.2resources:# keep request = limit to keep this container in guaranteed classlimits:cpu: 100mmemory: 50Mirequests:cpu: 100mmemory: 50Miports:- containerPort: 9090args:-  --apiserver-host=http://10.3.14.8:8080livenessProbe:httpGet:path: /port: 9090initialDelaySeconds: 30timeoutSeconds: 30

svc

apiVersion: v1
kind: Service
metadata:name: kubernetes-dashboardnamespace: kube-systemlabels:k8s-app: kubernetes-dashboardkubernetes.io/cluster-service: "true"
spec:selector:k8s-app: kubernetes-dashboardports:- port: 80targetPort: 9090

创建deployment、pod

[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl get svc --all-namespaces
NAMESPACE     NAME                   CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE
default       kubernetes             10.20.0.1      <none>        443/TCP   5m
kube-system   kubernetes-dashboard   10.20.87.235   <none>        80/TCP    2m
[root@iz2ze0fq2isg8vphkpos5rz dashboard]# 
[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl create -f dashboard.yaml
deployment "kubernetes-dashboard-latest" created
[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl get pod --all-namespaces
NAMESPACE     NAME                                           READY     STATUS    RESTARTS   AGE
kube-system   kubernetes-dashboard-latest-2600502794-n48m3   1/1       Running   0          15s
[root@iz2ze0fq2isg8vphkpos5rz dashboard]# 

创建svc

[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl create -f dashboardsvc.yaml
service "kubernetes-dashboard" created
[root@iz2ze0fq2isg8vphkpos5rz dashboard]# kubectl get svc --all-namespaces
NAMESPACE     NAME                   CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE
default       kubernetes             10.20.0.1      <none>        443/TCP   2m
kube-system   kubernetes-dashboard   10.20.87.235   <none>        80/TCP    9s

访问 http://10.3.14.8:8080/ui

7. 安装DNS

dns configmap 配置 kubedns-cm.yaml

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.apiVersion: v1
kind: ConfigMap
metadata:name: kube-dnsnamespace: kube-systemlabels:addonmanager.kubernetes.io/mode: EnsureExists

dns serviceaccount 配置 kubedns-sa.yaml

apiVersion: v1
kind: ServiceAccount
metadata:name: kube-dnsnamespace: kube-systemlabels:kubernetes.io/cluster-service: "true"addonmanager.kubernetes.io/mode: Reconcile

dns pod  配置 kubedns-deployment.yaml

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.# TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.*
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.# __MACHINE_GENERATED_WARNING__apiVersion: extensions/v1beta1
kind: Deployment
metadata:name: kube-dnsnamespace: kube-systemlabels:k8s-app: kube-dnskubernetes.io/cluster-service: "true"
spec:replicas: 1# replicas: not specified here:# 1. In order to make Addon Manager do not reconcile this replicas parameter.# 2. Default is 1.# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.strategy:rollingUpdate:maxSurge: 10%maxUnavailable: 0selector:matchLabels:k8s-app: kube-dnstemplate:metadata:labels:k8s-app: kube-dnsannotations:scheduler.alpha.kubernetes.io/critical-pod: ''scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'spec:containers:- name: kubednsimage: myhub.fdccloud.com/library/kubedns-amd64:1.9volumeMounts:- name: tz-configmountPath: /etc/localtimeresources:# TODO: Set memory limits when we've profiled the container for large# clusters, then set request = limit to keep this container in# guaranteed class. Currently, this container falls into the# "burstable" category so the kubelet doesn't backoff from restarting it.limits:memory: 170Mirequests:cpu: 100mmemory: 70MilivenessProbe:httpGet:path: /healthz-kubednsport: 8080scheme: HTTPinitialDelaySeconds: 60timeoutSeconds: 5successThreshold: 1failureThreshold: 5readinessProbe:httpGet:path: /readinessport: 8081scheme: HTTP# we poll on pod startup for the Kubernetes master service and# only setup the /readiness HTTP server once that's available.initialDelaySeconds: 3timeoutSeconds: 5args:- --domain=cluster.local.- --dns-port=10053- --config-map=kube-dns- --kube-master-url=http://172.20.4.132:8080# This should be set to v=2 only after the new image (cut from 1.5) has# been released, otherwise we will flood the logs.- --v=0#__PILLAR__FEDERATIONS__DOMAIN__MAP__env:- name: PROMETHEUS_PORTvalue: "10055"ports:- containerPort: 10053name: dns-localprotocol: UDP- containerPort: 10053name: dns-tcp-localprotocol: TCP- containerPort: 10055name: metricsprotocol: TCP- name: dnsmasqimage: myhub.fdccloud.com/library/kube-dnsmasq-amd64:1.4volumeMounts:- name: tz-configmountPath: /etc/localtimelivenessProbe:httpGet:path: /healthz-dnsmasqport: 8080scheme: HTTPinitialDelaySeconds: 60timeoutSeconds: 5successThreshold: 1failureThreshold: 5args:- --cache-size=1000- --no-resolv- --server=127.0.0.1#10053#- --log-facility=-  #注释掉该行ports:- containerPort: 53name: dnsprotocol: UDP- containerPort: 53name: dns-tcpprotocol: TCP# see: https://github.com/kubernetes/kubernetes/issues/29055 for detailsresources:requests:cpu: 150mmemory: 10Mi- name: dnsmasq-metricsimage: myhub.fdccloud.com/library/dnsmasq-metrics-amd64:1.0volumeMounts:- name: tz-configmountPath: /etc/localtimelivenessProbe:httpGet:path: /metricsport: 10054scheme: HTTPinitialDelaySeconds: 60timeoutSeconds: 5successThreshold: 1failureThreshold: 5args:- --v=2- --logtostderrports:- containerPort: 10054name: metricsprotocol: TCPresources:requests:memory: 10Mi- name: healthzimage: myhub.fdccloud.com/library/exechealthz-amd64:1.2volumeMounts:- name: tz-configmountPath: /etc/localtimeresources:limits:memory: 50Mirequests:cpu: 10m# Note that this container shouldn't really need 50Mi of memory. The# limits are set higher than expected pending investigation on #29688.# The extra memory was stolen from the kubedns container to keep the# net memory requested by the pod constant.memory: 50Miargs:- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null- --url=/healthz-dnsmasq- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null- --url=/healthz-kubedns- --port=8080- --quietports:- containerPort: 8080protocol: TCPdnsPolicy: Default  # Don't use cluster DNS.volumes:- name: tz-confighostPath:path: /usr/share/zoneinfo/Asia/Shanghai

dns svc 配置 kubedns-svc.yaml

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.# TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.*# __MACHINE_GENERATED_WARNING__apiVersion: v1
kind: Service
metadata:name: kube-dnsnamespace: kube-systemlabels:k8s-app: kube-dnskubernetes.io/cluster-service: "true"kubernetes.io/name: "KubeDNS"
spec:selector:k8s-app: kube-dnsclusterIP: 10.20.10.10ports:- name: dnsport: 53protocol: UDP- name: dns-tcpport: 53protocol: TCP

kubectl create -f  kubedns-cm.yaml

kubectl create -f  kubedns-sa.yaml

kubectl create -f  kubedns-deployment.yaml

kubectl create -f  kubedns-svc.yaml

查看 deployment  pod   svc

[root@yzb-centos72-3 dns]# kubectl get deployment --all-namespaces
NAMESPACE     NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
kube-system   kube-dns                      1         1         1            1           19m
kube-system   kubernetes-dashboard-latest   1         1         1            1           19h
nginx-ns      nginx-depl                    1         1         1            1           32m
[root@yzb-centos72-3 dns]# kubectl get pod --all-namespaces
NAMESPACE     NAME                                           READY     STATUS    RESTARTS   AGE
default       busybox                                        1/1       Running   1          1h
kube-system   kube-dns-4025732692-4rs8d                      4/4       Running   0          19m
kube-system   kubernetes-dashboard-latest-3685189279-5j204   1/1       Running   2          19h
nginx-ns      nginx-depl-3484537133-4g31s                    1/1       Running   0          33m
[root@yzb-centos72-3 dns]# kubectl get svc --all-namespaces
NAMESPACE     NAME                   CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
default       kubernetes             10.20.0.1       <none>        443/TCP         19h
kube-system   kube-dns               10.20.10.10     <none>        53/UDP,53/TCP   1h
kube-system   kubernetes-dashboard   10.20.64.165    <none>        80/TCP          19h
nginx-ns      nginx-svc              10.20.202.116   <none>        6612/TCP        59m

测试pod   busybox.yaml

apiVersion: v1
kind: Pod
metadata:labels:name: busyboxrole: mastername: busybox
spec:containers:- name: busyboximage: myhub.fdccloud.com/library/busyboxcommand:- sleep- "3600"

创建pod

kubectl create -f busybox.yaml

查看pod

[root@yzb-centos72-3 dns]# kubectl get pod --all-namespaces
NAMESPACE     NAME                                           READY     STATUS    RESTARTS   AGE
default       busybox                                        1/1       Running   1          1h
kube-system   kube-dns-4025732692-4rs8d                      4/4       Running   0          23m
kube-system   kubernetes-dashboard-latest-3685189279-5j204   1/1       Running   2          19h
nginx-ns      nginx-depl-3484537133-4g31s                    1/1       Running   0          36

测试

[root@yzb-centos72-3 dns]# kubectl exec -it busybox -- nslookup kubernetes
Server:    10.20.10.10
Address 1: 10.20.10.10 kube-dns.kube-system.svc.cluster.localName:      kubernetes
Address 1: 10.20.0.1 kubernetes.default.svc.cluster.local

相关yaml下载:

http://download.csdn.net/detail/stonexmx/9880990

centos7 k8s1.5.2(kubernetes) etcd flannel dns dashboard nginx tomcat slb haproxy keepalived相关推荐

  1. Centos7 使用 kubeadm 安装Kubernetes 1.13.3

    目录 目录 什么是Kubeadm? 什么是容器存储接口(CSI)? 什么是CoreDNS? 1.环境准备 1.1.网络配置 1.2.更改 hostname 1.3.配置 SSH 免密码登录登录 1.4 ...

  2. VMware下centos7安装k8s(Kubernetes)多master集群

    上一节:VMware下centos7安装k8s(Kubernetes)集群 1.使用MobaXterm打开多个窗口,进行多窗口同时编辑,已提前改好IP和hostname. 2.修改hosts,用vim ...

  3. 在虚拟机环境(CentOS7系统)下将kubernetes中部署服务成功,但在虚拟机外部无法访问到服务...

    在CentOS7环境下,kubernetes单机版环境,成功部署一个服务,在虚拟机中访问服务没问题,下面这样: curl http://172.27.73.26:8888/eureka-server/ ...

  4. CentOS7 使用 kubeadm 搭建 kubernetes 集群(踩坑篇)

    序言 本人在搭建 k8s 集群的过程中曲折不断,故写下此文,欲与"同是天涯沦落人"分享. 这篇文章会详细描述安装过程中遇到的问题,以及本人相应的解决办法,如读者只想快速搭建 k8s ...

  5. 《Docker容器:利用Kubernetes、Flannel、Cockpit和Atomic构建和部署》——2.2 容器式Linux系统的Docker配置...

    本节书摘来自异步社区<Docker容器:利用Kubernetes.Flannel.Cockpit和Atomic构建和部署>一书中的第2章,第2.2节,作者: [美]Christopher ...

  6. kubeadm安装的Kubernetes etcd备份恢复

    kubeadm安装的Kubernetes etcd备份恢复 [TOC] 1. 事件由来 2018年9月16日台风过后,我的一套kuernetes测试系统,etcd启动失败,经过半天的抢救,仍然无果(3 ...

  7. CentOS7 使用 kubeadm 搭建 kubernetes 集群(极速篇)

    序言 本人在安装 k8s 的过程中曲折不断,故写下此文,让后人可以少走弯路. 这篇文章只描述如何从0开始快速搭建 k8s 集群,想更详细了解具体遇到问题的分析过程和解决方案,可阅读另一篇文章 -- C ...

  8. CentOS7+MySQL/MariaDB+Galera+HAProxy+Keepalived构建高可用数据库集群

    方案优势: Galera能够实现MySQL/MariaDB数据库的主主复制和多主复制等模式,这些复制模式都是同步进行的,同步时间非常短 每一个节点都可以同时写入和读取,当某一节点发生故障时,可自动从集 ...

  9. nginx、lvs、keepalived、f5、DNS轮询(lvs为何不能完全替代DNS轮询)

    一.问题域 nginx.lvs.keepalived.f5.DNS轮询,每每提到这些技术,往往讨论的是接入层的这样几个问题: 1)可用性:任何一台机器挂了,服务受不受影响 2)扩展性:能否通过增加机器 ...

最新文章

  1. 【学习总结】之 3Blue1Brown系列
  2. 简单点亮发光二极管实例(位操作)
  3. QML基础类型之var
  4. 网址导航html5源码图标版,最新仿hao123网址导航(晓风网址导航系统) v4.2
  5. PDF 开发者 Charles Geschke 去世,39 年前联合创立软件巨头 Adobe!
  6. 蓝桥杯2019年第十届C/C++省赛A组第三题-最大降雨量
  7. create-react-app脚手架中配置webpack的方法
  8. VTM10.0代码学习18:xCheckRDCostMerge2Nx2N()
  9. 谷歌输入法linux下载官网下载软件,linux下安装谷歌拼音输入法
  10. 稽首文殊,寒山之士;南无普…
  11. 面试文案策划要准备一些什么?
  12. Sentiment Polarity Detection for Software Development
  13. 美团运维SRE+运维开发一面面经汇总
  14. Ubuntu 安装 AMD GPU 驱动
  15. 简单粗暴搞定webpack4的style-loader!css-loader
  16. 零基础也能看懂的五大网络安全技术,学网络安全真的可以很简单
  17. arcgis制图 ——羽化效果
  18. 智慧民航新业态崭露头角,图扑数字孪生入局民航飞联网
  19. sketch_apr04a:82:4: error: stray ‘\302‘ in programsketch_apr04a:82:4: error: stray ‘\240‘ in progra
  20. AWS 中文入门开发教学 25- 高可用性设计 - 建立 ALB 负载均衡

热门文章

  1. android 蒲公英 类似平台,Jenkins之android APP打包上传蒲公英平台
  2. 把网页保存成markdowm的方法
  3. linux 中dirname的用法
  4. 关于任务计划指定错误:0X80041315、0X80070005拒绝访问
  5. 新年开工第一篇文章——推荐几个值得中小企业使用的ARM9/ARM11/Cortex A8处理器
  6. 事务(Transaction)的简单理解
  7. OracleP6机场工程进度控制系列15:总进度综合管控报告
  8. PCB板布线经验~~
  9. 传播易7月发力 微信大号营销成为前锋
  10. matlab应力分析,MATLAB有限元分析与应用概述.ppt