kubeadm安装多master节点的k8s集群(1)

2022/7/7 23:21:53

本文主要是介绍kubeadm安装多master节点的k8s集群(1),对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

一、环境准备

k8s集群角色 IP 主机名 安装的相关组件
控制节点 192.168.1.10 master apiserver、controller-manager、scheduler、kubelet、etcd、docker、kube-proxy、keepalived、nginx、calico
控制节点 192.168.1.11 pod1 apiserver、controller-manager、scheduler、kubelet、etcd、docker、kube-proxy、keepalived、nginx、calico
工作节点 192.168.1.12 pod2 kubelet、kube-porxy、docker、calico、coredns
VIP 192.168.1.20    

 

 

 

 

 

# 准备命令
# 1.修改主机名,配置静态IP
hostnamectl set-hostname master && bash

# 2.配置主机hosts
vi /etc/hosts
192.168.1.10 master
192.168.1.11 pod1
192.168.1.12 pod2

# 3.配置主机之间ssh信任
ssh-kegen -t rsa
ssh-copy-id master

# 4.关闭交换分区
swaoff -a  # 临时关闭
永久关闭为注销/etc/fstab中swap一行

# 5.修改机器内核参数
modprobe br_netfilter
echo "modprobe br_netfilter" >> /etc/profile

cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

sysctl -p /etc/sysctl.d/k8s.conf

# 6. 关闭防火墙
systemctl stop firewalld ; systemctl disable firewalld

# 7.关闭selinux,修改 x selinux  配置文件之后,重启
sed - - i 's/SELINUX=enforcing/SELINUX=disabled/g'
/etc/selinux/config

# 8.配置阿里云yum源
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum makecache fast

# 9.配置kubernets源
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0

# 10.时间同步并定时同步
yum install ntpdate -y
ntpdate time1.aliyun.com
* */1 * * * /usr/sbin/ntpdate time1.aliyun.com
systemctl restart crond

# 11.开启ipvs支持(组件kube-proxy用到)
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done

[root@master ~]#chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
ip_vs_ftp              13079  0 
nf_nat                 26583  1 ip_vs_ftp
ip_vs_sed              12519  0 
ip_vs_nq               12516  0 
ip_vs_sh               12688  0 
ip_vs_dh               12688  0 

# ipvs (IP Virtual Server)  实现了传输层负载均衡,也就是我们常说的 4层LAN交换,作为 Linux内核的一部分。ipvs运行在主机上,在真实服务器集群前充当负载均衡器。ipvs  可以将基于TCP 和 和 UDP的服务请求转发到真实服务器上,并使真实服务器的服务在单个 IP  地址上显示为虚拟服务

二、基础软件包安装

yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server 
socat conntrack ntpdate telnet ipvsadm iptables-services
# 停止iptables服务并禁止开机启动
service iptables stop && systemctl disable iptables
# 清空规则
iptables -F
  • docker服务安装
# 1.安装docker-ce,是否指定版本自定义
yum install docker-ce-20.10.17 docker-ce-cli-20.10.17 containerd.io -y

# 2.启动并设置为开机启动
systemctl start docker && systemctl enable docker && systemctl status docker

# 3.配置镜像加速器
mkdir /etc/docker
vi /etc/docker/daemon.json
{
"registry-mirrors":["https://pft7f97f.mirror.aliyuncs.com", "https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
#exec-opts 修改驱动
  修改docker文件驱动为systemd ,默认为 cgroupfs ,kubelet默认使用 systemd ,两者必须一致才可以.
systemctl daemon-reload && systemctl restart docker && systemctl status docker
  • K8S初始化所需软件包
# 安装软件包(一般不按装最新的)
yum install -y  kubelet-1.20.7 kubeadm-1.20.7 kubectl-1.20.7

systemctl enable kubelet && systemctl start kubelet && systemctl status kubelet

注:
Kubeadm : kubeadm  是一个工具,用来初始化 k ks 8s  集群的
kubelet : 安装在集群所有节点上,用于启动Pod的
kubectl: : 通过kubectl可以部署和管理应用,查看各种资源,创建、删除和更新各种组件

# 目前kubectl是起不来的,等初始化后就会自动启动
Active: activating (auto-restart) (Result: exit-code)
  • 通过keepalive+nginx实现k8s apiserver高可用
# 1.在master和pod1上安装nginx
yum install nginx keepalived -y

# 2.修改配置文件
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

stream {

    log_format main '$remote_addr $upstream_addr-[$time_local] $status $upstream_bytes_sent';
    access_log /var/log/nginx/k8s-access.log main;
    upstream k8s-apiserver {
        server  192.168.1.10:6443; # Master1 APISERVER IP:PORT
        server  192.168.1.11:6443; # Master2 APISERVER IP:PORT
    }
    server {
        listen 16443;
        proxy_pass k8s-apiserver;
    }
}

http {
    log_format main '$remote_addr - - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';
    access_log /var/log/nginx/access.log main;

    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    include /etc/nginx/mime.types
    default_type application/octet-stream;

    server {
        listen 80 default_server;
        server_name _;
        location / {
        }
    }
}

# yum安装的nginx检测会报错unknown directive "stream",没有stream模块
解决方法:yum install nginx-mod-stream -y
源码编译安装,添加参数:--with-stream

# keepalived.conf配置
# master:
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER
}

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33        # 实际网卡名称
    virtual_router_id 51  # vrrp路由ID实例,每个实例唯一
    priority 100   # 优先级,备服务器设置为90
    advert_int 1  # 指定vrrp心跳包通告间隔时间,默认1s
    authentication {
        auth_type PASS
        auth_pass 1111
    }
# 虚拟IP(VIP)
    virtual_ipaddress {
        192.168.1.20/24
    }
    track_script {
        check_nginx
    }
}

# vrrp_script :指定检查nginx工作状态脚本(根据nginx  状态判断是否故障转移)

# pod1的keepalived.conf
global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_BACKUP
}

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.1.20/24
    }
    track_script {
        check_nginx
    }
}


# /etc/keepalived/check_nginx.sh检查nginx脚本编写
#!/bin/bash
count=$(ps -ef | grep nginx | grep sbin | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
  systemctl stop keepalived
fi

chmod +x /etc/keepalived/check_nginx.sh
# 注:keepalived  根据脚本返回状态码(0为工作正常,非 0 不正常)判断是否故障转移
keepalived配置
# 启动程序
systemctl daemon-reload
systemctl start nginx && systemctl enable nginx && systemctl status nginx
systemctl start keepalived && systemctl enable keepalived && systemctl status keepalived

# master上可以看到2个IP,pod1上只能看到1个

# ip addr

2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:09:d2:4e brd ff:ff:ff:ff:ff:ff
inet 192.168.1.10/24 brd 192.168.1.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.1.20/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::7088:c179:979e:a672/64 scope link noprefixroute
valid_lft forever preferred_lft forever

测试:停止master的nginx就会发现192.168.1.20这个IP漂移到pod1服务器上,重启master的nginx和keepalived后,IP还会漂移回master

 三、Kubeadm初始化k8s集群

主节点:

# 在master上创建kubeadm-config.yaml
# 使用准备好的镜像
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.20.6
controlPlaneEndpoint: 192.168.1.20:16443
imageRepository: registry.aliyuncs.com
imageRepository: registry.aliyuncs.com/google_containers

apiServer:
  certSANs:
    - 192.168.1.10
    - 192.168.1.11
    - 192.168.1.12
    - 192.168.1.20

networking:
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.10.0.0/16
---
apiVersion:  kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

"""
注: --image-repository registry.aliyuncs.com/google_containers : 手动指定仓库地址为
registry.aliyuncs.com/google_containers kubeadm  默认从k8s.grc.io  拉取镜像,但是k8s.grc.io访问不到,所以需要指定从registry.aliyuncs.com/google_containers  仓库拉取镜像 。
"""

# kubeadm需要准备的镜像,可以提前准备然后倒入,这次用的别人准备好的
# master和pod1上导入准备好的镜像
导入:docker load -i k8simage-1-20-6.tar.gz
[root@master ~]# docker images
REPOSITORY                                                        TAG        IMAGE ID       CREATED         SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.20.6    9a1ebfd8124d   14 months ago   118MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.20.6    b93ab2ec4475   14 months ago   47.3MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.20.6    b05d611c1af9   14 months ago   122MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.20.6    560dd11d4550   14 months ago   116MB
calico/pod2daemon-flexvol                                         v3.18.0    2a22066e9588   16 months ago   21.7MB
calico/node                                                       v3.18.0    5a7c4970fbc2   16 months ago   172MB
calico/cni                                                        v3.18.0    727de170e4ce   16 months ago   131MB
calico/kube-controllers                                           v3.18.0    9a154323fbf7   16 months ago   53.4MB
registry.aliyuncs.com/google_containers/etcd                      3.4.13-0   0369cf4303ff   22 months ago   253MB
registry.aliyuncs.com/google_containers/coredns                   1.7.0      bfe3a36ebd25   2 years ago     45.2MB
registry.aliyuncs.com/google_containers/pause                     3.2        80d28bedfe5d   2 years ago     683kB

calico组件
https://projectcalico.docs.tigera.io/calico-enterprise/

# 初始化
kubeadm init  --config kubeadm-config.yaml -- ignore-preflighterrors=SystemVerification
# 执行结果:

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
# pod1需要,从k8s加入
  kubeadm join 192.168.1.20:16443 --token dmk0g4.1l9kz4el5ewkhy57 \
    --discovery-token-ca-cert-hash sha256:6e220a97f3d79d0b53b5ac18979dcfacdfb5da5ce0629017b745a8a4df162d27 \
    --control-plane 

Then you can join any number of worker nodes by running the following on each as root:
# k8s节点加入命令
kubeadm join 192.168.1.20:16443 --token dmk0g4.1l9kz4el5ewkhy57 \
    --discovery-token-ca-cert-hash sha256:6e220a97f3d79d0b53b5ac18979dcfacdfb5da5ce0629017b745a8a4df162d27 
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES                  AGE   VERSION
master   NotReady   control-plane,master   11h   v1.20.7
此时集群状态还是NotReady  状态,因为没有安装网络 插件 

 

 四、扩容k8s集群-添加master节点

从节点:

# pod1:
# 1.创建证书目录
mkdir -p /etc/kubernetes/pki/etcd && mkdir -p ~/.kube/

#  2.将master节点的证书拷贝到pod1对应目录里
[root@master pki]# cd /etc/kubernetes/pki/
[root@master pki]# scp ca.crt sa.key sa.pub front-proxy-ca.crt front-proxy-ca.key ca.key root@pod1:/etc/kubernetes/pki/

[root@master etcd]# cd /etc/kubernetes/pki/etcd/
[root@master etcd]# scp ca.crt ca.key root@pod1:/etc/kubernetes/pki/etcd/

# 3.在master主节点上查看生成token
[root@master ~]# kubeadm token create --print-join-command
kubeadm join 192.168.1.20:16443 --token lrbume.ymtfk5o4tvcd6cg2     --discovery-token-ca-cert-hash sha256:6e220a97f3d79d0b53b5ac18979dcfacdfb5da5ce0629017b745a8a4df162d27

# 4.pod1节点执行加入master
kubeadm join 192.168.1.20:16443 --token lrbume.ymtfk5o4tvcd6cg2 --discovery-token-ca-cert-hash sha256:6e220a97f3d79d0b53b5ac18979dcfacdfb5da5ce0629017b745a8a4df162d27 --control-plane

成功结果:Run 'kubectl get nodes' to see this node join the cluster.

[root@pod1 ~]# mkdir -p $HOME/.kube
[root@pod1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@pod1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@pod1 ~]# kubectl get nodes
NAME     STATUS     ROLES                  AGE   VERSION
master   NotReady   control-plane,master   11h   v1.20.7
pod1     NotReady   control-plane,master   43s   v1.20.7

[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES                  AGE     VERSION
master   NotReady   control-plane,master   12h     v1.20.7
pod1     NotReady   control-plane,master   2m33s   v1.20.7

五、添加node节点进入集群

# pod2节点需要
[root@pod2 ~]# docker images
REPOSITORY                                                        TAG        IMAGE ID       CREATED         SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.20.7    ff54c88b8ecf   14 months ago   118MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.20.7    034671b24f0f   14 months ago   122MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.20.7    22d1a2072ec7   14 months ago   116MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.20.7    38f903b54010   14 months ago   47.3MB
calico/pod2daemon-flexvol                                         v3.18.0    2a22066e9588   16 months ago   21.7MB
calico/node                                                       v3.18.0    5a7c4970fbc2   16 months ago   172MB
calico/cni                                                        v3.18.0    727de170e4ce   16 months ago   131MB
calico/kube-controllers                                           v3.18.0    9a154323fbf7   16 months ago   53.4MB
registry.aliyuncs.com/google_containers/etcd                      3.4.13-0   0369cf4303ff   22 months ago   253MB
registry.aliyuncs.com/google_containers/coredns                   1.7.0      bfe3a36ebd25   2 years ago     45.2MB
registry.aliyuncs.com/google_containers/pause                     3.2        80d28bedfe5d   2 years ago     683kB

# 将节点加入集群
kubeadm join 192.168.1.20:16443 --token lrbume.ymtfk5o4tvcd6cg2 --discovery-token-ca-cert-hash sha256:6e220a97f3d79d0b53b5ac18979dcfacdfb5da5ce0629017b745a8a4df162d27
执行成功:
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

# master上查看
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES                  AGE   VERSION
master   NotReady   control-plane,master   12h   v1.20.7
pod1     NotReady   control-plane,master   10m   v1.20.7
pod2     NotReady   <none>                 47s   v1.20.7

# 1.20版本ROLES节点显示都是<none>,有需要可以修改为work
[root@master ~]# kubectl label node pod2 node-role.kubernets.io/worker=worker
# 查看插件情况
# 名称空间
[root@master ~]# kubectl get pods
No resources found in default namespace.

# coredns都是Pending状态,因为没装网络插件,-o wide可以查看到服务安装在那个节点上
[root@master ~]# kubectl get pods -n kube-system -o wide
NAME                             READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
coredns-7f89b7bc75-bflk2         0/1     Pending   0          12h   <none>         <none>   <none>           <none>
coredns-7f89b7bc75-z4k77         0/1     Pending   0          12h   <none>         <none>   <none>           <none>
etcd-master                      1/1     Running   1          12h   192.168.1.10   master   <none>           <none>
etcd-pod1                        1/1     Running   0          24m   192.168.1.11   pod1     <none>           <none>
kube-apiserver-master            1/1     Running   1          12h   192.168.1.10   master   <none>           <none>
kube-apiserver-pod1              1/1     Running   0          24m   192.168.1.11   pod1     <none>           <none>
kube-controller-manager-master   1/1     Running   2          12h   192.168.1.10   master   <none>           <none>
kube-controller-manager-pod1     1/1     Running   0          24m   192.168.1.11   pod1     <none>           <none>
kube-proxy-8mt7s                 1/1     Running   0          14m   192.168.1.12   pod2     <none>           <none>
kube-proxy-bqt8c                 1/1     Running   0          24m   192.168.1.11   pod1     <none>           <none>
kube-proxy-vwb7g                 1/1     Running   1          12h   192.168.1.10   master   <none>           <none>
kube-scheduler-master            1/1     Running   2          12h   192.168.1.10   master   <none>           <none>
kube-scheduler-pod1              1/1     Running   0          24m   192.168.1.11   pod1     <none>           <none>

六、网络插件安装---Calico

# 1.注:在线下载配置文件地址是: https://docs.projectcalico.org/manifests/calico.yaml

# calico组件功能:功能多,性能好,可以做网络策略
# 2.安装
wget https://docs.projectcalico.org/manifests/calico.yaml --no-check-certificate
[root@master ~]# kubectl apply -f calico.yaml 

报错:calico版本不匹配
error: unable to recognize "calico.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1"

解决方法:更换calico.yml

# 3.查看
# 安装完成后可以发现coredns已经改变状态
[root@master ~]# kubectl get pods -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE     NOMINATED NODE   READINESS GATES
calico-kube-controllers-6949477b58-c8vzf   1/1     Running   0          2m43s   10.244.219.65   master   <none>           <none>
calico-node-2wqck                          1/1     Running   0          2m43s   192.168.1.12    pod2     <none>           <none>
calico-node-9898g                          1/1     Running   0          2m43s   192.168.1.10    master   <none>           <none>
calico-node-jstb4                          1/1     Running   0          2m43s   192.168.1.11    pod1     <none>           <none>
coredns-7f89b7bc75-bflk2                   1/1     Running   0          12h     10.244.219.67   master   <none>           <none>
coredns-7f89b7bc75-z4k77                   1/1     Running   0          12h     10.244.219.66   master   <none>           <none>
etcd-master                                1/1     Running   1          12h     192.168.1.10    master   <none>           <none>
etcd-pod1                                  1/1     Running   0          48m     192.168.1.11    pod1     <none>           <none>
kube-apiserver-master                      1/1     Running   1          12h     192.168.1.10    master   <none>           <none>
kube-apiserver-pod1                        1/1     Running   0          48m     192.168.1.11    pod1     <none>           <none>
kube-controller-manager-master             1/1     Running   2          12h     192.168.1.10    master   <none>           <none>
kube-controller-manager-pod1               1/1     Running   0          48m     192.168.1.11    pod1     <none>           <none>
kube-proxy-8mt7s                           1/1     Running   0          38m     192.168.1.12    pod2     <none>           <none>
kube-proxy-bqt8c                           1/1     Running   0          48m     192.168.1.11    pod1     <none>           <none>
kube-proxy-vwb7g                           1/1     Running   1          12h     192.168.1.10    master   <none>           <none>
kube-scheduler-master                      1/1     Running   2          12h     192.168.1.10    master   <none>           <none>
kube-scheduler-pod1                        1/1     Running   0          48m     192.168.1.11    pod1     <none>           <none>
# 状态从NotReady变为Ready
[root@master ~]# kubectl get nodes
NAME     STATUS   ROLES                  AGE   VERSION
master   Ready    control-plane,master   12h   v1.20.7
pod1     Ready    control-plane,master   49m   v1.20.7
pod2     Ready    <none>                 39m   v1.20.7

测试k8s创建pod是否可以正常联网:

# master上下载一个镜像镜像测试网络
docker search busybox
docker pull busybox
[root@master ~]# kubectl run busybox --image busybox:latest --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # ping baidu.com
PING baidu.com (220.181.38.148): 56 data bytes
64 bytes from 220.181.38.148: seq=0 ttl=127 time=29.778 ms
# 通过上面可以看到能访问网络 ,说明calico 网络插件已经被正常安装了

[root@master ~]# kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -sh
# 报错:
Unable to connect to the server: dial tcp: lookup h on 114.114.114.114:53: no such host
原因:-sh改成 -- sh  ;--和sh之间有空格

[root@pod1 ~]# kubectl get pods -o wide
NAME      READY   STATUS    RESTARTS   AGE     IP               NODE   NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          2m59s   10.244.145.193   pod2   <none>           <none>

测试k8s集群中部署tomcat服务

# 在pod2上加载镜像tomcat
[root@pod2 ~]# docker load -i tomcat.tar.gz

# 在master上执行
[root@master ~]# cat tomcat.yaml 
apiVersion: v1  #pod属于k8s核心组v1
kind: Pod  #创建的是一个Pod资源
metadata:  #元数据
  name: demo-pod  #pod名字
  namespace: default  #pod所属的名称空间
  labels:
    app: myapp  #pod具有的标签
    env: dev      #pod具有的标签
spec:
  containers:      #定义一个容器,容器是对象列表,下面可以有多个name
  - name:  tomcat-pod-java  #容器的名字
    ports:
    - containerPort: 8080
    image: tomcat:8.5-jre8-alpine   #容器使用的镜像
    imagePullPolicy: IfNotPresent

[root@master ~]# kubectl apply -f tomcat.yaml
[root@master ~]# kubectl get pods -o wide
NAME       READY   STATUS    RESTARTS   AGE   IP               NODE   NOMINATED NODE   READINESS GATES
demo-pod   1/1     Running   0          51s   10.244.145.194   pod2   <none>           <none>
[root@master ~]# curl -I 10.244.145.194:8080
HTTP/1.1 200 
Content-Type: text/html;charset=UTF-8
Transfer-Encoding: chunked
Date: Thu, 07 Jul 2022 04:47:25 GMT

# 10.244.145.194 只能在k8s集群中访问,外网不能访问,如果需要访问要做服务
[root@master ~]# cat tomcat-service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: tomcat
spec:
  type: NodePort
  ports:
    - port: 8080
      nodePort: 30080  #物理机端口30080映射到容器中8080
  selector:
    app: myapp
    env: dev

[root@master ~]# kubectl apply -f tomcat-service.yaml 
service/tomcat created
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.10.0.1      <none>        443/TCP          13h
tomcat       NodePort    10.10.13.139   <none>        8080:30080/TCP   9s
# 再次测试
[root@master ~]# curl -I 192.168.1.10:30080
HTTP/1.1 200 
Content-Type: text/html;charset=UTF-8
Transfer-Encoding: chunked
Date: Thu, 07 Jul 2022 04:56:04 GMT
浏览器访问
http://192.168.1.10:30080

 

七、测试coredns是否正常,是否能解析域名

# 查看coredns的IP
[root@master ~]# kubectl get svc -n kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.10.0.10   <none>        53/UDP,53/TCP,9153/TCP   13h

[root@master ~]# kubectl run busybox --image busybox:latest --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ #  nslookup kubernetes.default.svc.cluster.local
Server:        10.10.0.10
Address:    10.10.0.10:53

Name:    kubernetes.default.svc.cluster.local
Address: 10.10.0.1

*** Can't find kubernetes.default.svc.cluster.local: No answer

/ # nslookup tomcat.default.svc.cluster.local
Server:        10.10.0.10
Address:    10.10.0.10:53

Name:    tomcat.default.svc.cluster.local
Address: 10.10.13.139

# 10.10.0.10就是coredns的ip,10.10.13.139就是tomcat的IP了,说明coreDNS已经配置好了
解析内部Service的名称,是通过coreDNS去解析

 



这篇关于kubeadm安装多master节点的k8s集群(1)的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!


扫一扫关注最新编程教程