Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-04-09 07:58:23

0001 #!/bin/bash
0002 
0003 mkdir /opt/k8s
0004 cd /opt/k8s
0005 
0006 curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
0007 
0008 # install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
0009 install -o root -g root -m 0755 kubectl /usr/bin/kubectl
0010 kubectl version --client
0011 
0012 # kubectl config ~/.kube/config
0013 
0014 # iptables bridged traffic
0015 cat /sys/class/dmi/id/product_uuid
0016 lsmod | grep br_netfilter
0017 
0018 cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
0019 br_netfilter
0020 EOF
0021 
0022 cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
0023 net.bridge.bridge-nf-call-ip6tables = 1
0024 net.bridge.bridge-nf-call-iptables = 1
0025 EOF
0026 
0027 sysctl --system
0028 
0029 #yum install telnet
0030 
0031 # install docker-ce
0032 yum remove docker \
0033                   docker-client \
0034                   docker-client-latest \
0035                   docker-common \
0036                   docker-latest \
0037                   docker-latest-logrotate \
0038                   docker-logrotate \
0039                   docker-engine
0040 yum-config-manager \
0041     --add-repo \
0042     https://download.docker.com/linux/centos/docker-ce.repo
0043 yum-config-manager --disable docker-ce-nightly
0044 
0045 yum-config-manager --setopt "docker-ce-stable.priority=1" --enable docker-ce-stable
0046 yum install docker-ce docker-ce-cli containerd.io
0047 
0048 systemctl start docker
0049 
0050 docker run hello-world
0051 
0052 # docker post installation
0053 groupadd docker
0054 usermod -aG docker $USER
0055 usermod -aG docker wguan
0056 
0057 #logout and login to activate the new group docker for account
0058 newgrp docker 
0059 
0060 # change cgroup to from cgroupfs to systemd
0061 vi /usr/lib/systemd/system/docker.service
0062 change
0063 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
0064 to
0065 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
0066 
0067 systemctl daemon-reload
0068 systemctl restart docker
0069 docker info
0070 
0071 # start docker on boot
0072 # systemctl enable docker.service
0073 # systemctl enable containerd.service
0074 
0075 # install kubernetes
0076 cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
0077 [kubernetes]
0078 name=Kubernetes
0079 baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
0080 enabled=1
0081 gpgcheck=1
0082 repo_gpgcheck=1
0083 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
0084 exclude=kubelet kubeadm kubectl
0085 EOF
0086 
0087 cat <<EOF | sudo tee /etc/yum-puppet.repos.d/kubernetes.repo
0088 [kubernetes]
0089 name=Kubernetes
0090 baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
0091 enabled=1
0092 gpgcheck=1
0093 repo_gpgcheck=1
0094 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
0095 exclude=kubelet kubeadm kubectl
0096 EOF
0097 
0098 sudo setenforce 0
0099 sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
0100 
0101 sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
0102 systemctl enable --now kubelet
0103 
0104 # to clean previous installation
0105 # kubeadm reset
0106 [root@aipanda162 k8s]# kubeadm init
0107     [init] Using Kubernetes version: v1.23.5
0108     [preflight] Running pre-flight checks
0109         [WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
0110     [preflight] Pulling images required for setting up a Kubernetes cluster
0111     [preflight] This might take a minute or two, depending on the speed of your internet connection
0112     [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
0113     [certs] Using certificateDir folder "/etc/kubernetes/pki"
0114     [certs] Generating "ca" certificate and key
0115     [certs] Generating "apiserver" certificate and key
0116     [certs] apiserver serving cert is signed for DNS names [aipanda162.cern.ch kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 188.184.109.78]
0117     [certs] Generating "apiserver-kubelet-client" certificate and key
0118     [certs] Generating "front-proxy-ca" certificate and key
0119     [certs] Generating "front-proxy-client" certificate and key
0120     [certs] Generating "etcd/ca" certificate and key
0121     [certs] Generating "etcd/server" certificate and key
0122     [certs] etcd/server serving cert is signed for DNS names [aipanda162.cern.ch localhost] and IPs [188.184.109.78 127.0.0.1 ::1]
0123     [certs] Generating "etcd/peer" certificate and key
0124     [certs] etcd/peer serving cert is signed for DNS names [aipanda162.cern.ch localhost] and IPs [188.184.109.78 127.0.0.1 ::1]
0125     [certs] Generating "etcd/healthcheck-client" certificate and key
0126     [certs] Generating "apiserver-etcd-client" certificate and key
0127     [certs] Generating "sa" key and public key
0128     [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
0129     [kubeconfig] Writing "admin.conf" kubeconfig file
0130     [kubeconfig] Writing "kubelet.conf" kubeconfig file
0131     [kubeconfig] Writing "controller-manager.conf" kubeconfig file
0132     [kubeconfig] Writing "scheduler.conf" kubeconfig file
0133     [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
0134     [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
0135     [kubelet-start] Starting the kubelet
0136     [control-plane] Using manifest folder "/etc/kubernetes/manifests"
0137     [control-plane] Creating static Pod manifest for "kube-apiserver"
0138     [control-plane] Creating static Pod manifest for "kube-controller-manager"
0139     [control-plane] Creating static Pod manifest for "kube-scheduler"
0140     [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
0141     [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
0142     [apiclient] All control plane components are healthy after 8.004179 seconds
0143     [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
0144     [kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster
0145     NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently.
0146     [upload-certs] Skipping phase. Please see --upload-certs
0147     [mark-control-plane] Marking the node aipanda162.cern.ch as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
0148     [mark-control-plane] Marking the node aipanda162.cern.ch as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
0149     [bootstrap-token] Using token: 8yyxeo.hfw5ovunckalxvp7
0150     [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
0151     [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
0152     [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
0153     [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
0154     [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
0155     [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
0156     [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
0157     [addons] Applied essential addon: CoreDNS
0158     [addons] Applied essential addon: kube-proxy
0159 
0160     Your Kubernetes control-plane has initialized successfully!
0161 
0162     To start using your cluster, you need to run the following as a regular user:
0163 
0164       mkdir -p $HOME/.kube
0165       sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
0166       sudo chown $(id -u):$(id -g) $HOME/.kube/config
0167 
0168     Alternatively, if you are the root user, you can run:
0169 
0170       export KUBECONFIG=/etc/kubernetes/admin.conf
0171 
0172     You should now deploy a pod network to the cluster.
0173     Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
0174       https://kubernetes.io/docs/concepts/cluster-administration/addons/
0175 
0176     Then you can join any number of worker nodes by running the following on each as root:
0177 
0178     kubeadm join 188.184.109.78:6443 --token 8yyxeo.hfw5ovunckalxvp7 \
0179         --discovery-token-ca-cert-hash sha256:fa6675768a87db2ab0736d09b7bdcaaa9d1147dff13d67a4b3f62939dd46a486 
0180 
0181 
0182 # create pod network
0183 export KUBECONFIG=/etc/kubernetes/admin.conf
0184 kubectl create -f https://docs.projectcalico.org/v3.15/manifests/calico.yaml
0185 
0186 # on other nodes, join the master
0187 kubeadm join 188.184.109.78:6443 --token 8yyxeo.hfw5ovunckalxvp7 \
0188         --discovery-token-ca-cert-hash sha256:fa6675768a87db2ab0736d09b7bdcaaa9d1147dff13d67a4b3f62939dd46a486
0189     [preflight] Running pre-flight checks
0190         [WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
0191     [preflight] Reading configuration from the cluster...
0192     [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
0193     [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
0194     [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
0195     [kubelet-start] Starting the kubelet
0196     [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
0197 
0198     This node has joined the cluster:
0199     * Certificate signing request was sent to apiserver and a response was received.
0200     * The Kubelet was informed of the new secure connection details.
0201 
0202     Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
0203 
0204 
0205 #on aipanda162, accept all connection from aipanda161
0206 iptables -I INPUT -p tcp -s 188.184.109.78 -j ACCEPT
0207 iptables -I OUTPUT -p tcp -d  188.184.109.78 -j ACCEPT
0208 #on aipanda161, accept all connections from aipanda162
0209 iptables -I INPUT -p tcp -s 188.184.28.249 -j ACCEPT
0210 iptables -I OUTPUT -p tcp -d 188.184.28.249 -j ACCEPT
0211