
01
写在之前
kube-scheduler作为kubemaster核心组件运行在master节点上面,主要是watch kube-apiserver中未被调度的Pod,如果有,通过调度算法找到最适合的节点Node,然后通过kube-apiserver以对象(pod名称、Node节点名称等)的形式写入到etcd中来完成调度,kube-scheduler的高可用与kube-controller-manager一样,需要使用选举的方式产生。
下载https://dl.k8s.io/v1.17.0/kubernetes-server-linux-amd64.tar.gz二进制文件并分发到所有master节点服务器。
02
创建kubeconfig文件并分发
#!/bin/bash
cd /data/k8s/work
source /data/k8s/bin/env.sh
kubectl config set-cluster kubernetes \
--certificate-authority=/data/k8s/work/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-scheduler.kubeconfig root@${node_ip}:/etc/kubernetes/
done
03
创建kube-scheduler配置文件模板
#!/bin/bash
cd /data/k8s/work
source /data/k8s/bin/env.sh
cat >kube-scheduler.yaml.template <<EOF
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
bindTimeoutSeconds: 600
clientConnection:
burst: 200
kubeconfig: "/etc/kubernetes/kube-scheduler.kubeconfig"
qps: 100
enableContentionProfiling: false
enableProfiling: true
hardPodAffinitySymmetricWeight: 1
healthzBindAddress: 127.0.0.1:10251
leaderElection:
leaderElect: true
metricsBindAddress: ##NODE_IP##:10251
EOF
04
替换并分发配置文件
#!/bin/bash
cd /data/k8s/work
source /data/k8s/bin/env.sh
# 替换
for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-scheduler.yaml.template > kube-scheduler-${NODE_IPS[i]}.yaml
done
# 分发
for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-scheduler-${node_ip}.yaml root@${node_ip}:/etc/kubernetes/kube-scheduler.yaml
done
05
创建kube-scheduler启动文件模板
#!/bin/bash
cd /data/k8s/work
source /data/k8s/bin/env.sh
cat > kube-scheduler.service.template <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
WorkingDirectory=${K8S_DIR}/kube-scheduler
ExecStart=/data/k8s/bin/kube-scheduler \\
--config=/etc/kubernetes/kube-scheduler.yaml \\
--bind-address=##NODE_IP## \\
--secure-port=10259 \\
--port=10251 \\
--tls-cert-file=/etc/kubernetes/cert/kube-scheduler.pem \\
--tls-private-key-file=/etc/kubernetes/cert/kube-scheduler-key.pem \\
--authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
--client-ca-file=/etc/kubernetes/cert/ca.pem \\
--requestheader-allowed-names="aggregator" \\
--requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
--requestheader-extra-headers-prefix="X-Remote-Extra-" \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
--logtostderr=true \\
--v=2
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
EOF
启动参数详解
| 配置选项 | 选项说明 |
| --config=/etc/kubernetes/kube-scheduler.yaml | 配置文件的路径 |
| --bind-address= | 监控地址 |
| --secure-port=10259 | 监听的安全端口,设置为0,不提供安全端口 |
| --port=10251 | 监听非安全端口,设置为0,不提供非安全端口 |
| --tls-cert-file=/etc/kubernetes/cert/kube-scheduler.pem | 包含默认的 HTTPS x509 证书的文件,(CA证书(如果有)在服务器证书之后并置),如果启用了 HTTPS 服务,并且未提供 --tls-cert-file 和 --tls-private-key-file,则会为公共地址生成一个自签名证书和密钥,并将其保存到 --cert-dir 指定的目录中 |
| --tls-private-key-file=/etc/kubernetes/cert/kube-scheduler-key.pem | 包含与 --tls-cert-file 匹配的默认 x509 私钥的文件 |
| --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig | 指定kube-scheduler做为kube-apiserver客户端时使用kubeconfig文件 |
| --client-ca-file=/etc/kubernetes/cert/ca.pem | 如果已设置,由 client-ca-file 中的授权机构签名的客户端证书的任何请求都将使用与客户端证书的 CommonName 对应的身份进行身份验证 |
| --requestheader-allowed-names="aggregator" | 客户端证书通用名称列表允许在 --requestheader-username-headers 指定的头部中提供用户名。如果为空,则允许任何由权威机构 --requestheader-client-ca-file 验证的客户端证书。 |
| --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem | 在信任 --requestheader-username-headers 指定的头部中的用户名之前用于验证传入请求上的客户端证书的根证书包。警告:通常不依赖于传入请求已经完成的授权。 |
| --requestheader-extra-headers-prefix="X-Remote-Extra-" | 要检查请求头部前缀列表。建议使用 X-Remote-Extra- |
| --requestheader-group-headers=X-Remote-Group | 用于检查组的请求头部列表。建议使用 X-Remote-Group |
| --requestheader-username-headers=X-Remote-User | 用于检查用户名的请求头部列表。X-Remote-User 很常见。 |
| --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig | 指向具有足够权限以创建 subjectaccessreviews.authorization.k8s.io 的 'core' kubernetes 服务器的 kubeconfig 文件,这是可选的,如果为空,则禁止所有未经授权跳过的请求 |
| --logtostderr=true | 日志记录到标准错误而不是文件 |
| --v=2 | 日志级别详细程度的数字 |
06
启动模块替换并分发
#!/bin/bash
cd /data/k8s/work
source /data/k8s/bin/env.sh
# 替换模板文件
for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_NAME##/${NODE_NAMES[i]}/" -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-scheduler.service.template > kube-scheduler-${NODE_IPS[i]}.service
done
# 分发
for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
scp kube-scheduler-${node_ip}.service root@${node_ip}:/etc/systemd/system/kube-scheduler.service
done
07
启动kube-scheduler
#!/bin/bash
cd /data/k8s/work
source /data/k8s/bin/env.sh
for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "mkdir -p ${K8S_DIR}/kube-scheduler"
ssh root@${node_ip} "systemctl daemon-reload && systemctl enable kube-scheduler && systemctl restart kube-scheduler"
done
08
进程验证
#!/bin/bash
cd /data/k8s/work
source data/k8s/bin/env.sh
for node_ip in ${MASTER_IPS[@]}
do
echo ">>> ${node_ip}"
ssh root@${node_ip} "netstat -antp |grep kube-schedule|grep LISTEN|grep -v grep"
done
验证结果
>>> 192.168.16.104
tcp 0 0 192.168.16.104:10251 0.0.0.0:* LISTEN 24327/kube-schedule
tcp 0 0 127.0.0.1:10251 0.0.0.0:* LISTEN 24327/kube-schedule
tcp 0 0 192.168.16.104:10259 0.0.0.0:* LISTEN 24327/kube-schedule
>>> 192.168.16.105
tcp 0 0 192.168.16.105:10251 0.0.0.0:* LISTEN 24448/kube-schedule
tcp 0 0 127.0.0.1:10251 0.0.0.0:* LISTEN 24448/kube-schedule
tcp 0 0 192.168.16.105:10259 0.0.0.0:* LISTEN 24448/kube-schedule
>>> 192.168.16.106
tcp 0 0 192.168.16.106:10251 0.0.0.0:* LISTEN 15659/kube-schedule
tcp 0 0 127.0.0.1:10251 0.0.0.0:* LISTEN 15659/kube-schedule
tcp 0 0 192.168.16.106:10259 0.0.0.0:* LISTEN 15659/kube-schedule
09
查看kube-scheduler leader
[root@master01 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master03.k8s.vip_1cd7c2d0-67c5-4dec-917f-8b3d504072a5","leaseDurationSeconds":15,"acquireTime":"2020-01-28T09:06:30Z","renewTime":"2020-01-28T09:15:54Z","leaderTransitions":3}'
creationTimestamp: "2019-12-28T13:59:00Z"
name: kube-scheduler
namespace: kube-system
resourceVersion: "7272253"
selfLink: api/v1/namespaces/kube-system/endpoints/kube-scheduler
uid: f5d06d23-dcb8-459d-881d-6bd1003dcf71
[root@master01 ~]#
10
总结
kube-scheduler提供非安全端口10251, 安全端口10259;
kube-scheduler 部署3节点高可用,通过选举产生leader;
它监视kube-apiserver提供的watch接口,它根据预选和优选策略两个环节找一个最佳适配,然后调度到此节点;
分享使我快乐

往期分享
第三篇 PKI基础概念、cfssl工具介绍及kubernetes中证书
第七篇 验证kube-apiserver及kubeconfig配置详解
第八篇 kube-controller-manager安装及验证
文章转载自Linux点滴运维实践,如果涉嫌侵权,请发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




