您的位置:首页 > 运维架构 > Docker

二进制kubernetes+docker

2020-08-30 15:23 986 查看

kubernetes,简称K8s,是用8代替8个字符“ubernete”而成的缩写。是一个开源的,用于管理云平台中多个主机上的容器化的应用,Kubernetes的目标是让部署容器化的应用简单并且高效(powerful),Kubernetes提供了应用部署,规划,更新,维护的一种机制


实验环境

centos7.2_x64


实验软件

k8s_server 192.168.10.15 k8s1 

k8s_node   192.168.10.11 k8s2 

kubernetes-server-linux-amd64.tar.gz

kubernetes-node-linux-amd64.tar.gz


k8s1

     docker 

     etcd:2379

     kube-apiserver:8080

     kube-controller-manager:8080

     kube-scheduler:8080

k8s2

    docker

    etcd:2379

    kubelet:10250

    kube-proxy:10249


软件安装

systemctl daemon-reload

systemctl restart ntpd && systemctl enable ntpd

systemctl stop firewalld && systemctl disable firewalld

systemctl stop NetworkManager  && systemctl disable NetworkManager

echo SELINUX=disabled > /etc/sysconfig/selinux 

ntpdate 192.168.10.15 && hclock -w

hostnamectl set-hostname k8s1 

cp -pv /etc/hosts /etc/hosts.bak

echo 192.168.10.15 k8s1 > /etc/hosts

echo 192.168.10.11 k8s2 >> /etc/hosts

rsync -avz /etc/sysconfig/selinux root@192.168.10.11:/etc/sysconfig/selinux

rsync -avz /etc/hosts root@192.168.10.11:/etc/hosts


yum install -y yum-utils device-mapper-persistent-data lvm2 bridge-utils device-mapper docker-ce

systemctl daemon-reload

systemctl restart docker && systemctl enable docker


k8s1安装etc存储

yum install -y etcd

cp -pv /etc/etcd/etcd.conf /etc/etcd/etcd.conf.bak

cat /etc/etcd/etcd.conf

ETCD_NAME="default"

ETCD_DATA_DIR="/var/lib/etcd/default"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379

systemctl start etcd && systemctl enable etcd

etcd --version 

etcd Version: 3.3.11


etcdctl cluster-health

member 8e9e05c52164694d is healthy: got healthy result from http://0.0.0.0:2379

cluster is healthy


tar zxvf /root/kubernetes-server-linux-amd64.tar.gz

mv /root/kubernetes /usr/local/kuber

mkdir -pv /usr/local/kuber/{ssl,bin,cfg}

mv /usr/local/kuber/server/bin/{kube-apiserver,kube-scheduler,kube-controller-manager,kubectl} /usr/local/kuber/bin/

ln -s /usr/local/kuber/bin/* /bin/


k8s1安装kube-apiserver

touch /usr/local/kuber/cfg/kube-apiserver

cat /usr/local/kuber/cfg/kube-apiserver 

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.10.15:2379"

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

KUBE_API_PORT="--insecure-port=8080"

KUBE_ADVERTISE_ADDR="--advertise-address=192.168.10.15"

KUBE_ALLOW_PRIV="--allow-privileged=false"

KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=192.168.11.0/24"


cat /usr/local/kuber/cfg/kube-apiserver 

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.10.15:2379"

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

KUBE_API_PORT="--insecure-port=8080"

KUBE_ADVERTISE_ADDR="--advertise-address=192.168.10.15"

KUBE_ALLOW_PRIV="--allow-privileged=false"

KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=192.168.11.0/24"


cat /usr/lib/systemd/system/kube-apiserver.service 

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/kubernetes/kubernetes


[Service]

EnvironmentFile=-/usr/local/kuber/cfg/kube-apiserver

#ExecStart=/usr/local/kube/bin/kube-apiserver ${KUBE_APISERVER_OPTS}

ExecStart=/usr/local/kuber/bin/kube-apiserver \

${KUBE_LOGTOSTDERR} \

${KUBE_LOG_LEVEL} \

${KUBE_ETCD_SERVERS} \

${KUBE_API_ADDRESS} \

${KUBE_API_PORT} \

${KUBE_ADVERTISE_ADDR} \

${KUBE_ALLOW_PRIV} \

${KUBE_SERVICE_ADDRESSES}

Restart=on-failure


[Install]

WantedBy=multi-user.target

You have new mail in /var/spool/mail/root

systemctl enable kube-apiserver &&  systemctl restart kube-apiserver


k8s1安装kube-scheduler

touch /usr/local/kuber/cfg/kube-scheduler

cat /usr/local/kuber/cfg/kube-scheduler 

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

KUBE_MASTER="--master=192.168.10.15:8080"

KUBE_LEADER_ELECT="--leader-elect"


touch /usr/lib/systemd/system/kube-scheduler.service

cat /usr/lib/systemd/system/kube-scheduler.service

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/usr/local/kuber/cfg/kube-scheduler

ExecStart=/usr/local/kuber/bin/kube-scheduler \

${KUBE_LOGTOSTDERR} \

${KUBE_LOG_LEVEL} \

${KUBE_MASTER} \

${KUBE_LEADER_ELECT}

Restart=on-failure

[Install]

WantedBy=multi-user.target

systemctl enable kube-scheduler && systemctl restart kube-scheduler


k8s1安装kube-controller-manager

touch /usr/local/kuber/cfg/kube-controller-manager

cat /usr/local/kuber/cfg/kube-controller-manager 

GTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

KUBE_MASTER="--master=192.168.10.15:8080"


touch /usr/lib/systemd/system/kube-controller-manager.service

cat /usr/lib/systemd/system/kube-controller-manager 

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=-/usr/local/kuber/cfg/kube-controller-manager

ExecStart=/usr/local/kuber/bin/kube-controller-manager \

${KUBE_LOGTOSTDERR} \

${KUBE_LOG_LEVEL} \

${KUBE_MASTER} \

${KUBE_LEADER_ELECT}

Restart=on-failure

[Install]

WantedBy=multi-user.target

systemctl enable kube-controller-manager && systemctl restart kube-controller-manager


k8s_slave安装

swapoff  -a

cp -pv /etc/fstab /etc/fstab.bak

cat /etc/fstab

#UUID=b3465121-8e32-44e0-894c-ee76e987cd88 swap                    swap    defaults        0 0


systemctl stop firewalld && systemctl disable firewalld

systemctl stop NetworkManager  && systemctl disable NetworkManager

yum install -y yum-utils device-mapper-persistent-data lvm2 bridge-utils device-mapper docker-ce

systemctl daemon-reload

systemctl restart docker && systemctl enable docker


k8s2安装etc存储

yum install -y etcd

cp -pv /etc/etcd/etcd.conf /etc/etcd/etcd.conf.bak

cat /etc/etcd/etcd.conf

ETCD_NAME="default"

ETCD_DATA_DIR="/var/lib/etcd/default"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379

systemctl start etcd && systemctl enable etcd

etcd --version 

etcd Version: 3.3.11


tar zxvf /root/kubernetes-node-linux-amd64.tar.gz

mv /root/kubernetes /usr/local/kuber

mkdir -pv /usr/local/kuber/{bin,cfg}

mv /usr/local/kuber/node/bin/{kubelet,kube-proxy} /usr/local/kuber/bin/

ln -s /usr/local/kuber/bin/* /bin/


k8s2安装kubelet

touch /usr/local/kuber/cfg/kubelet.kubeconfig

cat /usr/local/kuber/cfg/kubelet.kubeconfig 

apiVersion: v1

kind: Config

clusters:

  - cluster:

      server: http://192.168.10.15:8080

    name: local

contexts:

  - context:

      cluster: local

    name: local

current-context: local


touch /usr/local/kuber/cfg/kubelet

cat /usr/local/kuber/cfg/kubelet

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

NODE_ADDRESS="--address=192.168.10.11"

NODE_PORT="--port=10250"

NODE_HOSTNAME="--hostname-override=192.168.10.11"

KUBELET_KUBECONFIG="--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig"

KUBE_ALLOW_PRIV="--allow-privileged=false"

KUBELET_DNS_IP="--cluster-dns=202.106.0.20"

KUBELET_DNS_DOMAIN="--cluster-domain=cluster.local"

KUBELET_SWAP="--fail-swap-on=false"


toch /usr/lib/systemd/system/kubelet.service

cat  /usr/lib/systemd/system/kubelet.service

[Unit]

Description=Kubernetes Kubelet

After=docker.service

Requires=docker.service


[Service]

EnvironmentFile=-/usr/local/kuber/cfg/kubelet

ExecStart=/usr/local/kuber/bin/kubelet  $KUBELET_OPTS

Restart=on-failure

KillMode=process


[Install]

WantedBy=multi-user.target

systemctl enable  kubelet && systemctl restart  kubelet


k8s2安装kubelet-proxy

touch /usr/local/kuber/cfg/kube-proxy

cat /usr/local/kuber/cfg/kube-proxy 

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=4"

NODE_HOSTNAME="--hostname-override=192.168.10.12"

KUBE_MASTER="--master=http://192.168.10.15:8080"


touch /usr/lib/systemd/system/kube-proxy.service

cat /usr/lib/systemd/system/kube-proxy.service 

[Unit]

Description=Kubernetes Proxy

After=network.target

[Service]

EnvironmentFile=-/usr/local/kuber/cfg/kube-proxy

ExecStart=/usr/local/kuber/bin/kube-proxy \

${KUBE_LOGTOSTDERR} \

${KUBE_LOG_LEVEL} \

${NODE_HOSTNAME} \

${KUBE_MASTER}

Restart=on-failure

[Install]

WantedBy=multi-user.target

systemctl enable kube-proxy  && systemctl restart kube-proxy


ip addr | grep docker0

docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN 

    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0


ps -ef | grep docker

root        963      1  0 14:28 ?        00:00:00 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

root       4384   2329  0 14:56 pts/0    00:00:00 grep --color=auto docker

ps -ef | grep kube

root        494      1  0 14:28 ?        00:00:01 /usr/local/kuber/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.10.12 --master=http://192.168.10.15:8080

root       3945      1  1 14:54 ?        00:00:00 /usr/local/kuber/bin/kubelet

root       4203   2329  0 14:55 pts/0    00:00:00 grep --color=auto kube


netstat -tuplna | grep 2379

tcp        0      0 127.0.0.1:57906         127.0.0.1:2379          ESTABLISHED 2077/etcd           

tcp6       0      0 :::2379                 :::*                    LISTEN      2077/etcd           

tcp6       0      0 127.0.0.1:2379          127.0.0.1:57906         ESTABLISHED 2077/etcd 

netstat -tuplna | grep 8080 

tcp        0      0 192.168.10.15:59480     192.168.10.15:8080      ESTABLISHED 12042/kube-schedule 

tcp6       0      0 :::8080                 :::*                    LISTEN      11960/kube-apiserve 

tcp6       0      0 192.168.10.15:8080      192.168.10.15:59472     ESTABLISHED 11960/kube-apiserve 

tcp        0      0 192.168.10.15:59516     192.168.10.15:8080      ESTABLISHED 12162/kube-controll

netstat -tuplna | grep kube-proxy

tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      5910/kube-proxy     

tcp        0      0 192.168.10.14:33104     192.168.10.15:8080      ESTABLISHED 5910/kube-proxy     

tcp        0      0 192.168.10.14:33102     192.168.10.15:8080      ESTABLISHED 5910/kube-proxy     

tcp6       0      0 :::10256                :::*                    LISTEN      5910/kube-proxy

netstat -tuplna | grep kubelet

tcp        0      0 127.0.0.1:41975         0.0.0.0:*               LISTEN      5683/kubelet        

tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      5683/kubelet        

tcp6       0      0 :::10250                :::*                    LISTEN      5683/kubelet        

tcp6       0      0 :::10255                :::*                    LISTEN      5683/kubelet 

内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: