使用containerd安装kubernetes v1.20.9

发表于 Kubernetes 分类,标签:
安装containerd容器组件

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

查看containerd.io可用版本
yum search containerd.io --showduplicates

安装最新版containerd.io
yum install -y containerd.io

创建containerd配置文件
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml

修改config.toml文件中的sandbox_image、endpoint、systemd cgroup参数
vim /etc/containerd/config.toml

[plugins]
  [plugins."io.containerd.grpc.v1.cri"]
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.5"

  [plugins."io.containerd.grpc.v1.cri".registry]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
        endpoint = ["https://registry.cn-hangzhou.aliyuncs.com"]

  [plugins."io.containerd.grpc.v1.cri"]
    systemd_cgroup = true





或者使用sed脚本一键修改config.toml文件
sed -i "s#k8s.gcr.io#registry.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ systemd_cgroup = true' /etc/containerd/config.toml
sed -i "s#https://registry-1.docker.io#https://registry.cn-hangzhou.aliyuncs.com#g" /etc/containerd/config.toml


重启containerd.io
systemctl daemon-reload
systemctl enable containerd
systemctl restart containerd






systemctl enable kubelet
systemctl restart kubelet


cat >> /etc/hosts <<EOF
192.168.1.221 master221
192.168.1.222 worker222
192.168.1.223 worker223
192.168.1.224 worker224
EOF

echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" > /etc/sysconfig/kubelet

cat kubeadm-init.yaml

# kubeadm config print init-defaults
# https://www.guojingyi.cn/912.html
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.191
  bindPort: 6443
nodeRegistration:
  # criSocket: /var/run/dockershim.sock
  criSocket: /run/containerd/containerd.sock
  name: master191
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  # extraArgs:
  #   authorization-mode: Node,RBAC
  certSANs:
  - "192.168.1.191"
  - "127.0.0.1"
  - localhost
  - master191
  - vpn.idx.ee
  - kubernetes
  - kubernetes.default
  - kubernetes.default.svc
  - kubernetes.default.svc.cluster
  - kubernetes.default.svc.cluster.local
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
controlPlaneEndpoint: "192.168.1.191:6443"
kind: ClusterConfiguration
kubernetesVersion: v1.20.9
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
  strictARP: true
--- 
apiVersion: kubelet.config.k8s.io/v1beta1 
kind: KubeletConfiguration 
cgroupDriver: systemd





cat metrics.yaml 

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        image: registry.cn-shenzhen.aliyuncs.com/stonek8s/metrics-server:v0.5.0
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          initialDelaySeconds: 20
          periodSeconds: 10
        resources:
          requests:
            cpu: 100m
            memory: 200Mi
        securityContext:
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {}
        name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100


https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml


https://docs.projectcalico.org/manifests/calico.yaml


cat ingress-nginx-values.yaml 

USER-SUPPLIED VALUES:
USER-SUPPLIED VALUES: null
controller:
  admissionWebhooks:
    annotations: {}
    enabled: false
    failurePolicy: Fail
  image:
    digest: sha256:2b29d459bb978bc773fcfc824a837bb52d6223aba342411d22d61522e58f811b
    repository: registry.cn-hangzhou.aliyuncs.com/mypaas/ingress-nginx-controller
    tag: v0.43.0
  ingressClass: nginx
  kind: Deployment
  metrics:
    enabled: true
    prometheusRule:
      additionalLabels:
        release: kube-prometheus-stack
      enabled: false
      namespace: monitoring
      rules: []
    service:
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    serviceMonitor:
      additionalLabels:
        release: kube-prometheus-stack
      enabled: true
      namespace: monitoring
      scrapeInterval: 30s
  minAvailable: 1
  name: controller
  publishService:
    enabled: true
  replicaCount: 2
  resources:
    limits:
      cpu: 500m
      memory: 512Mi
    requests:
      cpu: 200m
      memory: 256Mi
  service:
    annotations: null
    enabled: true
    externalTrafficPolicy: Cluster
    type: LoadBalancer
  stats:
    enabled: true
defaultBackend:
  enabled: false



helm repo add bitnami https://charts.bitnami.com/bitnami
helm install metallb -f 下面的文件 bitnami/metallb
configInline:
  address-pools:
  - name: default
    protocol: layer2
    addresses:
    - 192.168.1.195-192.168.1.198


0 篇评论

发表我的评论