容器国产化适配

申威k8s适配

Posted by 爱折腾的工程师 on Sunday, September 29, 2019

物理环境

两台机器

cpu架构:sw_64

  • 10.10.30.211 中标麒麟 4.4.15-aere+(Centos)
  • 10.10.30.212 深度系统 deepin 15.1 kui(Debian 8.0)

系统初始化

10.10.30.211(中标麒麟)

配置源

[root@node211 yum.repos.d]# cat /etc/yum.repos.d/neokylin.repo
[neokylin]
name= NeoKylin 7.0
#baseurl=http://download.cs2c.com.cn/neokylin/desktop/releases/7.0/$basearch/os/
baseurl=http://download.cs2c.com.cn/neokylin/server/everything/7.2/$basearch
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-neokylin-$basearch

配置yum代理

[root@node211 yum.repos.d]# cat /etc/yum.conf
[main]
proxy=http://10.10.30.211:18888  # 追加

10.10.30.212(深度系统)

配置deb源

root@node212:~# cat /etc/apt/sources.list
deb [trusted=yes] http://as4s.packages.deepin.com kui main contrib non-free
deb [trusted=yes] http://as4s.packages.deepin.com kui-backports main contrib non-free
deb [trusted=yes] http://as4s.packages.deepin.com kui-security main contrib non-free
deb [trusted=yes] http://as4s.packages.deepin.com kui-updates main contrib non-free
deb [trusted=yes] http://as4s.packages.deepin.com stable main contrib non-free
deb [trusted=yes] http://as4s.packages.deepin.com stable-backports main contrib non-free
deb [trusted=yes] http://as4s.packages.deepin.com stable-security main contrib non-free
deb [trusted=yes] http://as4s.packages.deepin.com stable-updates main contrib non-free

docker运行验证

10.10.30.211(中标麒麟)

安装docker

yum install docker

查看docker版本

[root@node211 yum.repos.d]# docker info
Containers: 0
 Running: 0
 Paused: 0
 Stopped: 0
Images: 0
Server Version: 1.12.2
Storage Driver: devicemapper

10.10.30.212(深度系统)

root@node212:~# lsb_release -a
No LSB modules are available.
Distributor ID:	deepin
Description:	deepin GNU/Linux 15.1 (kui)
Release:	15.1
Codename:	kui

安装docker

apt-get update
apt-get install docker.io kubeadm

拉取测试镜像

docker pull harbor.sh.deepin.com/library/minideb:latest

docker运行

docker run -d -it harbor.sh.deepin.com/library/minideb sh

k8s运行验证

10.10.30.211(中标麒麟)

查看k8s版本

[root@node211 ~]# yum list |grep kubernetes
kubernetes.sw_64                1.2.0-0.27.git4a3f9c5.ns7.3
kubernetes-client.sw_64         1.2.0-0.27.git4a3f9c5.ns7.3
kubernetes-devel.noarch         1.2.0-0.27.git4a3f9c5.ns7.3
kubernetes-master.sw_64         1.2.0-0.27.git4a3f9c5.ns7.3
kubernetes-node.sw_64           1.2.0-0.27.git4a3f9c5.ns7.3
kubernetes-unit-test.sw_64      1.2.0-0.27.git4a3f9c5.ns7.3

Note: 默认yum源里k8s版本太低,先不装

10.10.30.212(深度系统)

关闭swap 参考链接:http://www.iceyao.com.cn/2017/12/05/Kubeadm%E5%AE%89%E8%A3%85Kubernetes1.8.4/

kubeadm安装k8s

kubeadm init --pod-network-cidr=10.244.0.0/16 \
    --token-ttl 0 \
    --apiserver-advertise-address=10.10.30.212 \
    --ignore-preflight-errors='Swap,SystemVerification' \
    --kubernetes-version=v1.13.5
mkdir -p /etc/cni/net.d/

应用flannel cni

root@node212:~# vim kube-flannel.yml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "host-gw"
      }
    }
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      hostNetwork: true
      nodeSelector:
        beta.kubernetes.io/arch: sw64
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: harbor.sh.deepin.com/sunway/flannel-sw64:v0.10.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: harbor.sh.deepin.com/sunway/flannel-sw64:v0.10.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: true
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg

默认内核没有vxlan,使用flannel host-gw模式

kubectl apply -f kube-flannel.yml

验证集群pod是否正常

root@node212:~# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
node212   Ready    <none>   3h12m   v1.13.5-25+088c3a696a5f8d
root@node212:~# kubectl get pod --all-namespaces

NAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE
kube-system   coredns-864c657549-7j4l2          1/1     Running   0          49m
kube-system   coredns-864c657549-rcz6r          1/1     Running   0          49m
kube-system   etcd-node212                      1/1     Running   0          3h8m
kube-system   kube-apiserver-node212            1/1     Running   0          3h8m
kube-system   kube-controller-manager-node212   1/1     Running   0          159m
kube-system   kube-flannel-ds-fhvzm             1/1     Running   0          50m
kube-system   kube-proxy-pcs8n                  1/1     Running   0          3h8m
kube-system   kube-scheduler-node212            1/1     Running   0          3h8m

测试nginx镜像

kubectl run --image harbor.sh.deepin.com/sunway/nginx:1.14.1 test
kubectl expose deployment test --port=80 --target-port=80

参考链接

「真诚赞赏,手留余香」

爱折腾的工程师

真诚赞赏,手留余香

使用微信扫描二维码完成支付