{"id":5494,"date":"2024-02-10T10:29:23","date_gmt":"2024-02-10T09:29:23","guid":{"rendered":"http:\/\/miro.borodziuk.eu\/?p=5494"},"modified":"2025-05-05T19:13:54","modified_gmt":"2025-05-05T17:13:54","slug":"kubernetes-excercises","status":"publish","type":"post","link":"http:\/\/miro.borodziuk.eu\/index.php\/2024\/02\/10\/kubernetes-excercises\/","title":{"rendered":"Kubernetes Excercises"},"content":{"rendered":"<p><span style=\"color: #3366ff;\"><!--more--><\/span><\/p>\n<p><span style=\"color: #3366ff;\">Creating a Kubernetes Cluster<\/span><\/p>\n<ul>\n<li>Create a 3-node Kubernetes cluster, using one control plane node and 2<br \/>\nworker nodes.<\/li>\n<\/ul>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Scheduling a Pod<\/span><\/p>\n<ul>\n<li>Schedule a Pod with the name lab123 that runs the Nginx and redis<br \/>\napplications.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl run lab123 --image=nginx --dry-run=client -o yaml &gt; lab123.yaml\r\n[root@controller ~]# cat lab123.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: lab123\r\n  name: lab123\r\nspec:\r\n  containers:\r\n  - image: nginx\r\n    name: lab123\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# cat lab123.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: lab123\r\n  name: lab123\r\nspec:\r\n  containers:\r\n  - image: nginx\r\n    name: lab123\r\n    resources: {}\r\n  - image: redis\r\n    name: redis\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl apply -f lab123.yaml\r\npod\/lab123 created\r\n\r\n[root@controller ~]# kubectl get pods -o wide\r\nNAME     READY   STATUS    RESTARTS   AGE   IP              NODE                  NOMINATED NODE   READINESS GATES\r\nlab123   2\/2     Running   0          6s    192.168.0.155   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Managing Application Initialization<\/span><\/p>\n<ul>\n<li>Create a deployment with the name lab124deploy which runs the Nginx image,<br \/>\nbut waits 30 seconds before starting the actual Pods.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true\">[root@controller ~]# kubectl create deploy lab124deploy --image=busybox --dry-run=client -o yaml -- sleep 30 &gt; lab124deploy.yaml\r\n[root@controller ~]# cat lab124deploy.yaml\r\napiVersion: apps\/v1\r\nkind: Deployment\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    app: lab124deploy\r\n  name: lab124deploy\r\nspec:\r\n  replicas: 1\r\n  selector:\r\n    matchLabels:\r\n      app: lab124deploy\r\n  strategy: {}\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: lab124deploy\r\n    spec:\r\n      containers:\r\n      - command:\r\n        - sleep\r\n        - \"30\"\r\n        image: busybox\r\n        name: busybox\r\n        resources: {}\r\nstatus: {}\r\n<\/pre>\n<p>Go to the kubernetes documentation page -&gt; search: init container -&gt; copy init container in use:<\/p>\n<pre class=\"lang:default mark:9-12 decode:true\">apiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: myapp-pod\r\n  labels:\r\n    app.kubernetes.io\/name: MyApp\r\nspec:\r\n  containers:\r\n  - name: myapp-container\r\n    image: busybox:1.28\r\n    command: ['sh', '-c', 'echo The app is running! &amp;&amp; sleep 3600']\r\n  initContainers:\r\n  - name: init-myservice\r\n    image: busybox:1.28\r\n    command: ['sh', '-c', \"until nslookup myservice.$(cat \/var\/run\/secrets\/kubernetes.io\/serviceaccount\/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done\"]\r\n  - name: init-mydb\r\n    image: busybox:1.28\r\n    command: ['sh', '-c', \"until nslookup mydb.$(cat \/var\/run\/secrets\/kubernetes.io\/serviceaccount\/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done\"]<\/pre>\n<p>Modify <code>lab124deploy.yaml<\/code> so it looks like:<\/p>\n<pre class=\"lang:default mark:22-24 decode:true\">[root@controller ~]# cat lab124deploy.yaml\r\napiVersion: apps\/v1\r\nkind: Deployment\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    app: lab124deploy\r\n  name: lab124deploy\r\nspec:\r\n  replicas: 1\r\n  selector:\r\n    matchLabels:\r\n      app: lab124deploy\r\n  strategy: {}\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: lab124deploy\r\n    spec:\r\n      containers:\r\n      - name: nginx\r\n        image: nginx\r\n      initContainers:\r\n      - command:\r\n        - sleep\r\n        - \"30\"\r\n        image: busybox\r\n        name: busybox\r\n        resources: {}\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl apply -f lab124deploy.yaml\r\ndeployment.apps\/lab124deploy created\r\n[root@controller ~]# kubectl get pods\r\nNAME                            READY   STATUS     RESTARTS   AGE\r\nlab123                          2\/2     Running    0          18m\r\nlab124deploy-7c7c8457f9-lclk4   0\/1     Init:0\/1   0          7s\r\n\r\n[root@controller ~]# kubectl get pods\r\nNAME                            READY   STATUS            RESTARTS   AGE\r\nlab123                          2\/2     Running           0          19m\r\nlab124deploy-7c7c8457f9-lclk4   0\/1     PodInitializing   0          35s\r\n\r\n[root@controller ~]# kubectl get pods\r\nNAME                            READY   STATUS    RESTARTS   AGE\r\nlab123                          2\/2     Running   0          19m\r\nlab124deploy-7c7c8457f9-lclk4   1\/1     Running   0          42s\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Setting up Persistent Storage<\/span><\/p>\n<p>Create a Persistent Volume with the name lab125 that uses HostPath on the<br \/>\ndirectory \/lab125.<\/p>\n<p>Go to Kubernetes documentation:\u00a0 <em>persistent volume -&gt; Configure a Pod to Use a PersistentVolume for Storage -&gt; Create a PersistentVolume<\/em><\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# vi lab125.yaml\r\n\r\n[root@controller ~]# cat lab125.yaml\r\napiVersion: v1\r\nkind: PersistentVolume\r\nmetadata:\r\n  name: lab125\r\n  labels:\r\n    type: local\r\nspec:\r\n  storageClassName: manual\r\n  capacity:\r\n    storage: 10Gi\r\n  accessModes:\r\n    - ReadWriteOnce\r\n  hostPath:\r\n    path: \"\/lab125\"\r\n\r\n[root@controller ~]# kubectl apply -f lab125.yaml\r\npersistentvolume\/lab125 created\r\n\r\n[root@controller ~]# kubectl get pv\r\nNAME     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE\r\nlab125   10Gi       RWO            Retain           Available           manual                  9s\r\n\r\n[root@controller ~]# kubectl describe pv lab125\r\nName:            lab125\r\nLabels:          type=local\r\nAnnotations:     &lt;none&gt;\r\nFinalizers:      [kubernetes.io\/pv-protection]\r\nStorageClass:    manual\r\nStatus:          Available\r\nClaim:\r\nReclaim Policy:  Retain\r\nAccess Modes:    RWO\r\nVolumeMode:      Filesystem\r\nCapacity:        10Gi\r\nNode Affinity:   &lt;none&gt;\r\nMessage:\r\nSource:\r\n    Type:          HostPath (bare host directory volume)\r\n    Path:          \/lab125\r\n    HostPathType:\r\nEvents:            &lt;none&gt;\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Configuring Application Access<\/span><\/p>\n<ul>\n<li>Create a Deployment with the name lab126deploy, running 3 instances of<br \/>\nthe Nginx image.<\/li>\n<li>Configure it such that it can be accessed by external users on port 32567 on each cluster node.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl create deployment lab126deploy --image=nginx --replicas=3\r\ndeployment.apps\/lab126deploy created\r\n\r\n[root@controller ~]# kubectl expose deployment lab126deploy --port=80\r\nservice\/lab126deploy exposed\r\n\r\n[root@controller ~]# kubectl get all --selector  app=lab126deploy\r\nNAME                               READY   STATUS    RESTARTS   AGE\r\npod\/lab126deploy-fff46cd4b-4drk6   1\/1     Running   0          51s\r\npod\/lab126deploy-fff46cd4b-lhmfs   1\/1     Running   0          51s\r\npod\/lab126deploy-fff46cd4b-zw5fq   1\/1     Running   0          51s\r\n\r\nNAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE\r\nservice\/lab126deploy   ClusterIP   10.105.103.37   &lt;none&gt;        80\/TCP    34s\r\n\r\nNAME                           READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/lab126deploy   3\/3     3            3           51s\r\n\r\nNAME                                     DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/lab126deploy-fff46cd4b   3         3         3       51s\r\n\r\n[root@controller ~]# kubectl explain service.spec.ports\r\n...\r\n  nodePort      &lt;integer&gt;\r\n    The port on each node on which this service is exposed when type is NodePort\r\n    or LoadBalancer.  Usually assigned by the system. If a value is specified,\r\n    in-range, and not in use it will be used, otherwise the operation will fail.\r\n    If not specified, a port will be allocated if this Service requires one.  If\r\n    this field is specified when creating a Service which does not need it,\r\n    creation will fail. This field will be wiped when updating a Service to no\r\n    longer need it (e.g. changing type from NodePort to ClusterIP). More info:\r\n    https:\/\/kubernetes.io\/docs\/concepts\/services-networking\/service\/#type-nodeport\r\n ...\r\n\r\n[root@controller ~]# kubectl edit svc lab126deploy\r\n<\/pre>\n<p>Edit the svc:<\/p>\n<pre class=\"lang:default decode:true \">apiVersion: v1\r\nkind: Service\r\nmetadata:\r\n  creationTimestamp: \"2024-02-18T12:07:17Z\"\r\n  labels:\r\n    app: lab126deploy\r\n  name: lab126deploy\r\n  namespace: default\r\n  resourceVersion: \"495475\"\r\n  uid: 591535a4-24ba-406c-8f37-cb0d2e594ba3\r\nspec:\r\n  clusterIP: 10.105.103.37\r\n  clusterIPs:\r\n  - 10.105.103.37\r\n  internalTrafficPolicy: Cluster\r\n  ipFamilies:\r\n  - IPv4\r\n  ipFamilyPolicy: SingleStack\r\n  ports:\r\n  - port: 80\r\n    protocol: TCP\r\n    targetPort: 80\r\n  selector:\r\n    app: lab126deploy\r\n  sessionAffinity: None\r\n  type: ClusterIP\r\nstatus:\r\n  loadBalancer: {}\r\n<\/pre>\n<p>After svc has been edited:<\/p>\n<pre class=\"lang:default mark:23,27 decode:true \">apiVersion: v1\r\nkind: Service\r\nmetadata:\r\n  creationTimestamp: \"2024-02-18T12:07:17Z\"\r\n  labels:\r\n    app: lab126deploy\r\n  name: lab126deploy\r\n  namespace: default\r\n  resourceVersion: \"495475\"\r\n  uid: 591535a4-24ba-406c-8f37-cb0d2e594ba3\r\nspec:\r\n  clusterIP: 10.105.103.37\r\n  clusterIPs:\r\n  - 10.105.103.37\r\n  internalTrafficPolicy: Cluster\r\n  ipFamilies:\r\n  - IPv4\r\n  ipFamilyPolicy: SingleStack\r\n  ports:\r\n  - port: 80\r\n    protocol: TCP\r\n    targetPort: 80\r\n    nodePort: 32567\r\n  selector:\r\n    app: lab126deploy\r\n  sessionAffinity: None\r\n  type: NodePort\r\nstatus:\r\n  loadBalancer: {}\r\n<\/pre>\n<p>Let&#8217;s check:<\/p>\n<pre class=\"lang:default mark:8 decode:true\">[root@controller ~]# kubectl get all --selector  app=lab126deploy\r\nNAME                               READY   STATUS    RESTARTS   AGE\r\npod\/lab126deploy-fff46cd4b-4drk6   1\/1     Running   0          75m\r\npod\/lab126deploy-fff46cd4b-lhmfs   1\/1     Running   0          75m\r\npod\/lab126deploy-fff46cd4b-zw5fq   1\/1     Running   0          75m\r\n\r\nNAME                   TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE\r\nservice\/lab126deploy   NodePort   10.105.103.37   &lt;none&gt;        80:32567\/TCP   75m\r\n\r\nNAME                           READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/lab126deploy   3\/3     3            3           75m\r\n\r\nNAME                                     DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/lab126deploy-fff46cd4b   3         3         3       75m\r\n\r\n[root@controller ~]# kubectl get svc\r\nNAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE\r\nkubernetes     ClusterIP   10.96.0.1       &lt;none&gt;        443\/TCP        2d21h\r\nlab126deploy   NodePort    10.105.103.37   &lt;none&gt;        80:32567\/TCP   75m\r\n\r\n[root@controller ~]# kubectl describe svc lab126deploy\r\nName:                     lab126deploy\r\nNamespace:                default\r\nLabels:                   app=lab126deploy\r\nAnnotations:              &lt;none&gt;\r\nSelector:                 app=lab126deploy\r\nType:                     NodePort\r\nIP Family Policy:         SingleStack\r\nIP Families:              IPv4\r\nIP:                       10.105.103.37\r\nIPs:                      10.105.103.37\r\nPort:                     &lt;unset&gt;  80\/TCP\r\nTargetPort:               80\/TCP\r\nNodePort:                 &lt;unset&gt;  32567\/TCP\r\nEndpoints:                192.168.0.157:80,192.168.0.158:80,192.168.0.159:80\r\nSession Affinity:         None\r\nExternal Traffic Policy:  Cluster\r\nEvents:                   &lt;none&gt;\r\n\r\n[root@controller ~]# kubectl get pods -o wide\r\nNAME                            READY   STATUS    RESTARTS   AGE   IP              NODE                  NOMINATED NODE   READINESS GATES\r\nlab123                          2\/2     Running   0          23h   192.168.0.155   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab124deploy-7c7c8457f9-lclk4   1\/1     Running   0          23h   192.168.0.156   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-4drk6    1\/1     Running   0          88m   192.168.0.157   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-lhmfs    1\/1     Running   0          88m   192.168.0.159   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-zw5fq    1\/1     Running   0          88m   192.168.0.158   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\n\r\n[root@controller ~]# curl worker1.example.com:32567\r\n&lt;!DOCTYPE html&gt;\r\n&lt;html&gt;\r\n&lt;head&gt;\r\n&lt;title&gt;Welcome to nginx!&lt;\/title&gt;\r\n&lt;style&gt;\r\nhtml { color-scheme: light dark; }\r\nbody { width: 35em; margin: 0 auto;\r\nfont-family: Tahoma, Verdana, Arial, sans-serif; }\r\n&lt;\/style&gt;\r\n&lt;\/head&gt;\r\n&lt;body&gt;\r\n&lt;h1&gt;Welcome to nginx!&lt;\/h1&gt;\r\n&lt;p&gt;If you see this page, the nginx web server is successfully installed and\r\nworking. Further configuration is required.&lt;\/p&gt;\r\n\r\n&lt;p&gt;For online documentation and support please refer to\r\n&lt;a href=\"http:\/\/nginx.org\/\"&gt;nginx.org&lt;\/a&gt;.&lt;br\/&gt;\r\nCommercial support is available at\r\n&lt;a href=\"http:\/\/nginx.com\/\"&gt;nginx.com&lt;\/a&gt;.&lt;\/p&gt;\r\n\r\n&lt;p&gt;&lt;em&gt;Thank you for using nginx.&lt;\/em&gt;&lt;\/p&gt;\r\n&lt;\/body&gt;\r\n&lt;\/html&gt;\r\n\r\n\r\n[root@controller ~]# curl worker2.example.com:32567\r\n&lt;!DOCTYPE html&gt;\r\n&lt;html&gt;\r\n&lt;head&gt;\r\n&lt;title&gt;Welcome to nginx!&lt;\/title&gt;\r\n&lt;style&gt;\r\nhtml { color-scheme: light dark; }\r\nbody { width: 35em; margin: 0 auto;\r\nfont-family: Tahoma, Verdana, Arial, sans-serif; }\r\n&lt;\/style&gt;\r\n&lt;\/head&gt;\r\n&lt;body&gt;\r\n&lt;h1&gt;Welcome to nginx!&lt;\/h1&gt;\r\n&lt;p&gt;If you see this page, the nginx web server is successfully installed and\r\nworking. Further configuration is required.&lt;\/p&gt;\r\n\r\n&lt;p&gt;For online documentation and support please refer to\r\n&lt;a href=\"http:\/\/nginx.org\/\"&gt;nginx.org&lt;\/a&gt;.&lt;br\/&gt;\r\nCommercial support is available at\r\n&lt;a href=\"http:\/\/nginx.com\/\"&gt;nginx.com&lt;\/a&gt;.&lt;\/p&gt;\r\n\r\n&lt;p&gt;&lt;em&gt;Thank you for using nginx.&lt;\/em&gt;&lt;\/p&gt;\r\n&lt;\/body&gt;\r\n&lt;\/html&gt;\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Securing Network Traffic<\/span><\/p>\n<p>Create a Namespace with the name restricted, and configure it such that it<br \/>\nonly allows access to Pods exposing port 80 for Pods coming from the<br \/>\nNamespaces access.<\/p>\n<p>Go to the kubernetes documentation page -&gt;<em> search: network policy -&gt; Network Policies -&gt; The NetworkPolicy resource<\/em><\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# kubectl create ns restricted\r\nnamespace\/restricted created\r\n\r\n[root@controller ~]# kubectl create ns access\r\nnamespace\/access created\r\n\r\n[root@controller ~]# kubectl run testnginx --image=nginx -n restricted\r\npod\/testnginx created\r\n\r\n[root@controller ~]# kubectl run testbox --image=busybox -n access -- sleep 3600\r\npod\/testbox created\r\n\r\n[root@controller ~]# kubectl run testbox --image=busybox -- sleep 3600\r\npod\/testbox created\r\n\r\n[root@controller ~]# vi lab127.yaml\r\n[root@controller ~]# cat lab127.yaml\r\napiVersion: networking.k8s.io\/v1\r\nkind: NetworkPolicy\r\nmetadata:\r\n  name: test-network-policy\r\n  namespace: default\r\nspec:\r\n  podSelector:\r\n    matchLabels:\r\n      role: db\r\n  policyTypes:\r\n  - Ingress\r\n  - Egress\r\n  ingress:\r\n  - from:\r\n    - ipBlock:\r\n        cidr: 172.17.0.0\/16\r\n        except:\r\n        - 172.17.1.0\/24\r\n    - namespaceSelector:\r\n        matchLabels:\r\n          project: myproject\r\n    - podSelector:\r\n        matchLabels:\r\n          role: frontend\r\n    ports:\r\n    - protocol: TCP\r\n      port: 6379\r\n  egress:\r\n  - to:\r\n    - ipBlock:\r\n        cidr: 10.0.0.0\/24\r\n    ports:\r\n    - protocol: TCP\r\n      port: 5978\r\n<\/pre>\n<p>Let&#8217;s edit the 127lab.yaml.<\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# cat lab127.yaml\r\napiVersion: networking.k8s.io\/v1\r\nkind: NetworkPolicy\r\nmetadata:\r\n  name: test-network-policy\r\n  namespace: restricted\r\nspec:\r\n  policyTypes:\r\n  - Ingress\r\n  ingress:\r\n  - from:\r\n    - namespaceSelector:\r\n        matchLabels:\r\n          project: myproject\r\n    ports:\r\n    - protocol: TCP\r\n      port: 80\r\n\r\n[root@controller ~]# kubectl label ns access project=myproject\r\nnamespace\/access labeled\r\n\r\n[root@controller ~]# kubectl get ns --show-labels\r\nNAME               STATUS   AGE    LABELS\r\naccess             Active   154m   kubernetes.io\/metadata.name=access,project=myproject\r\ncalico-apiserver   Active   3d     kubernetes.io\/metadata.name=calico-apiserver,name=calico-apiserver,pod-security.kubernetes.io\/enforce-version=latest,pod-security.kubernetes.io\/enforce=privileged\r\ncalico-system      Active   3d     kubernetes.io\/metadata.name=calico-system,name=calico-system,pod-security.kubernetes.io\/enforce-version=latest,pod-security.kubernetes.io\/enforce=privileged\r\ndefault            Active   3d     kubernetes.io\/metadata.name=default\r\nkube-node-lease    Active   3d     kubernetes.io\/metadata.name=kube-node-lease\r\nkube-public        Active   3d     kubernetes.io\/metadata.name=kube-public\r\nkube-system        Active   3d     kubernetes.io\/metadata.name=kube-system\r\nrestricted         Active   155m   kubernetes.io\/metadata.name=restricted\r\ntigera-operator    Active   3d     kubernetes.io\/metadata.name=tigera-operator,name=tigera-operator,pod-security.kubernetes.io\/enforce=privileged\r\n\r\n[root@controller ~]# kubectl create -f lab127.yaml\r\nnetworkpolicy.networking.k8s.io\/test-network-policy created\r\n\r\n[root@controller ~]# kubectl describe &lt;TAB&gt;\r\napiservers.operator.tigera.io                                 events.events.k8s.io                                          networkpolicies.crd.projectcalico.org\r\napiservices.apiregistration.k8s.io                            felixconfigurations.crd.projectcalico.org                     networkpolicies.networking.k8s.io\r\nbgpconfigurations.crd.projectcalico.org                       felixconfigurations.projectcalico.org                         networkpolicies.projectcalico.org\r\nbgpconfigurations.projectcalico.org                           flowschemas.flowcontrol.apiserver.k8s.io                      networksets.crd.projectcalico.org\r\nbgpfilters.crd.projectcalico.org                              globalnetworkpolicies.crd.projectcalico.org                   networksets.projectcalico.org\r\nbgpfilters.projectcalico.org                                  globalnetworkpolicies.projectcalico.org                       nodes\r\nbgppeers.crd.projectcalico.org                                globalnetworksets.crd.projectcalico.org                       persistentvolumeclaims\r\nbgppeers.projectcalico.org                                    globalnetworksets.projectcalico.org                           persistentvolumes\r\nblockaffinities.crd.projectcalico.org                         horizontalpodautoscalers.autoscaling                          poddisruptionbudgets.policy\r\nblockaffinities.projectcalico.org                             hostendpoints.crd.projectcalico.org                           pods\r\ncaliconodestatuses.crd.projectcalico.org                      hostendpoints.projectcalico.org                               podtemplates\r\ncaliconodestatuses.projectcalico.org                          imagesets.operator.tigera.io                                  priorityclasses.scheduling.k8s.io\r\ncertificatesigningrequests.certificates.k8s.io                ingressclasses.networking.k8s.io                              prioritylevelconfigurations.flowcontrol.apiserver.k8s.io\r\nclusterinformations.crd.projectcalico.org                     ingresses.networking.k8s.io                                   profiles.projectcalico.org\r\nclusterinformations.projectcalico.org                         installations.operator.tigera.io                              replicasets.apps\r\nclusterrolebindings.rbac.authorization.k8s.io                 ipamblocks.crd.projectcalico.org                              replicationcontrollers\r\nclusterroles.rbac.authorization.k8s.io                        ipamconfigs.crd.projectcalico.org                             resourcequotas\r\ncomponentstatuses                                             ipamconfigurations.projectcalico.org                          rolebindings.rbac.authorization.k8s.io\r\nconfigmaps                                                    ipamhandles.crd.projectcalico.org                             roles.rbac.authorization.k8s.io\r\ncontrollerrevisions.apps                                      ippools.crd.projectcalico.org                                 runtimeclasses.node.k8s.io\r\ncronjobs.batch                                                ippools.projectcalico.org                                     secrets\r\ncsidrivers.storage.k8s.io                                     ipreservations.crd.projectcalico.org                          serviceaccounts\r\ncsinodes.storage.k8s.io                                       ipreservations.projectcalico.org                              services\r\ncsistoragecapacities.storage.k8s.io                           jobs.batch                                                    statefulsets.apps\r\ncustomresourcedefinitions.apiextensions.k8s.io                kubecontrollersconfigurations.crd.projectcalico.org           storageclasses.storage.k8s.io\r\ndaemonsets.apps                                               kubecontrollersconfigurations.projectcalico.org               tigerastatuses.operator.tigera.io\r\ndeployments.apps                                              leases.coordination.k8s.io                                    validatingwebhookconfigurations.admissionregistration.k8s.io\r\nendpoints                                                     limitranges                                                   volumeattachments.storage.k8s.io\r\nendpointslices.discovery.k8s.io                               mutatingwebhookconfigurations.admissionregistration.k8s.io\r\nevents                                                        namespaces\r\n\r\n[root@controller ~]# kubectl describe networkpolicies.networking.k8s.io -n restricted\r\nName:         test-network-policy\r\nNamespace:    restricted\r\nCreated on:   2024-02-18 11:31:10 -0500 EST\r\nLabels:       &lt;none&gt;\r\nAnnotations:  &lt;none&gt;\r\nSpec:\r\n  PodSelector:     &lt;none&gt; (Allowing the specific traffic to all pods in this namespace)\r\n  Allowing ingress traffic:\r\n    To Port: 80\/TCP\r\n    From:\r\n      NamespaceSelector: project=myproject\r\n  Not affecting egress traffic\r\n  Policy Types: Ingress\r\n\r\n[root@controller ~]# kubectl get pods -n restricted\r\nNAME        READY   STATUS    RESTARTS   AGE\r\ntestnginx   1\/1     Running   0          157m\r\n[root@controller ~]# kubectl expose pod testnginx --port=80 -n restricted\r\nservice\/testnginx exposed\r\n\r\n[root@controller ~]# kubectl get svc -n restricted\r\nNAME        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE\r\ntestnginx   ClusterIP   10.110.72.60   &lt;none&gt;        80\/TCP    28s\r\n\r\n[root@controller ~]# kubectl get svc -n restricted -o wide\r\nNAME        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE   SELECTOR\r\ntestnginx   ClusterIP   10.110.72.60   &lt;none&gt;        80\/TCP    38s   run=testnginx\r\n[root@controller ~]# kubectl exec -it testbox -n access -- curl 10.110.72.60\r\nerror: Internal error occurred: error executing command in container: failed to exec in container: failed to start exec \"73803fa926c75637709e237ef1c55e9ca8fdc4a6a50557d585ca55e8dc567a2b\": OCI runtime exec f                               ailed: exec failed: unable to start container process: exec: \"curl\": executable file not found in $PATH: unknown\r\n\r\n[root@controller ~]# kubectl exec -it testbox -n access -- wget 10.110.72.60\r\nConnecting to 10.110.72.60 (10.110.72.60:80)\r\nsaving to 'index.html'\r\nindex.html           100% |********************************|   615  0:00:00 ETA\r\n'index.html' saved\r\n\r\n[root@controller ~]# kubectl exec -it testbox -- wget 10.110.72.60\r\nConnecting to 10.110.72.60 (10.110.72.60:80)\r\n^Ccommand terminated with exit code 130\r\n\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Setting up Quota<\/span><\/p>\n<ul>\n<li>Create a Namespace with the name <em>limited<\/em> and configure it such that only<br \/>\n5 Pods can be started and the total amount of available memory for<br \/>\napplications running in that Namespace is limited to 2 GiB.<\/li>\n<li>Run a webserver Deployment with the name lab128deploy and using 3 Pods in this Namespace.<\/li>\n<li>Each of the Pods should request 128MiB memory and be limited to 256MiB.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl create ns limited\r\nnamespace\/limited created\r\n\r\n[root@controller ~]# kubectl create quota my-quota --hard=memory=2G,pods=5 -n limited\r\nresourcequota\/my-quota created\r\n\r\n[root@controller ~]# kubectl describe ns limited\r\nName:         limited\r\nLabels:       kubernetes.io\/metadata.name=limited\r\nAnnotations:  &lt;none&gt;\r\nStatus:       Active\r\n\r\nResource Quotas\r\n  Name:     my-quota\r\n  Resource  Used  Hard\r\n  --------  ---   ---\r\n  memory    0     2G\r\n  pods      0     5\r\n\r\nNo LimitRange resource.\r\n\r\n[root@controller ~]# kubectl create deploy lab128deploy --image=nginx --replicas=3 -n limited\r\ndeployment.apps\/lab128deploy created\r\n\r\n[root@controller ~]# kubectl set resources -h\r\n...\r\n  # Set the resource request and limits for all containers in nginx\r\n  kubectl set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi\r\n...\r\n\r\nroot@controller ~]# kubectl set resources deployment lab128deploy --limits=memory=256Mi --requests=memory=128Mi -n limited\r\ndeployment.apps\/lab128deploy resource requirements updated\r\n\r\n[root@controller ~]# kubectl get all -n limited\r\nNAME                                READY   STATUS              RESTARTS   AGE\r\npod\/lab128deploy-6f9c55779d-shcdl   0\/1     ContainerCreating   0          15s\r\n\r\nNAME                           READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/lab128deploy   0\/3     1            0           14m\r\n\r\nNAME                                      DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/lab128deploy-595cd4d5cb   3         0         0       14m\r\nreplicaset.apps\/lab128deploy-6f9c55779d   1         1         0       15s\r\n\r\n[root@controller ~]# kubectl describe ns limited\r\nName:         limited\r\nLabels:       kubernetes.io\/metadata.name=limited\r\nAnnotations:  &lt;none&gt;\r\nStatus:       Active\r\n\r\nResource Quotas\r\n  Name:     my-quota\r\n  Resource  Used   Hard\r\n  --------  ---    ---\r\n  memory    128Mi  2G\r\n  pods      1      5\r\n\r\nNo LimitRange resource.\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Creating a Static Pod<\/span><\/p>\n<p>Configure a Pod with the name lab129pod that will be started by the kubelet<br \/>\non node worker2 as a static Pod.<\/p>\n<p>On controller:<\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl run lab129pod --image=nginx --dry-run=client -o yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: lab129pod\r\n  name: lab129pod\r\nspec:\r\n  containers:\r\n  - image: nginx\r\n    name: lab129pod\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n<\/pre>\n<p>On worker2:<\/p>\n<pre class=\"lang:default decode:true \">[root@worker2 ~]# cd \/etc\/kubernetes\/manifests\r\n[root@worker2 manifests]# vi lab129pod.yaml\r\n[root@worker2 manifests]# cat lab129pod.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: lab129pod\r\n  name: lab129pod\r\nspec:\r\n  containers:\r\n  - image: nginx\r\n    name: lab129pod\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n<\/pre>\n<p>On Controller:<\/p>\n<pre class=\"lang:default mark:11 decode:true \">[root@controller ~]# kubectl get pods -o wide\r\nNAME                            READY   STATUS              RESTARTS             AGE     IP              NODE                  NOMINATED NODE   READINESS GATES\r\nlab123                          2\/2     Running             0                    44h     192.168.0.155   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab124deploy-7c7c8457f9-lclk4   1\/1     Running             0                    44h     192.168.0.156   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-4drk6    1\/1     Running             0                    22h     192.168.0.157   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-lhmfs    1\/1     Running             0                    22h     192.168.0.159   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-zw5fq    1\/1     Running             0                    22h     192.168.0.158   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab128deploy-595cd4d5cb-595b8   0\/1     Terminating         0                    52m     &lt;none&gt;          worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab128deploy-595cd4d5cb-bhnqm   0\/1     Terminating         0                    52m     &lt;none&gt;          worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab128deploy-595cd4d5cb-t86fz   0\/1     Terminating         0                    52m     &lt;none&gt;          worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab129pod-worker2.example.com   0\/1     ContainerCreating   0                    6m33s   &lt;none&gt;          worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\ntestbox                         1\/1     Running             19 (&lt;invalid&gt; ago)   20h     192.168.0.162   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Troubleshooting Node Services<\/span><\/p>\n<p>Assume that node worker2 is not currently available. Ensure that the appropriate service is started on that node which will show the node as<br \/>\nrunning.<\/p>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Configuring Cluster Access<\/span><\/p>\n<p>Create a ServiceAccount that has permissions to create Pods, Deployments,<br \/>\nDaemonSets and StatefulSets in the Namespace \u201caccess\u201d.<\/p>\n<p>Go to the kubernetes documentation page <em>-&gt; search: role -&gt; Using RBAC Authorization -&gt; Role examples<\/em><\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl create ns access\r\nError from server (AlreadyExists): namespaces \"access\" already exists\r\n\r\n[root@controller ~]# kubectl create role -h\r\nCreate a role with single rule.\r\n\r\nExamples:\r\n  # Create a role named \"pod-reader\" that allows user to perform \"get\", \"watch\" and \"list\" on pods\r\n  kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods\r\n...\r\n\r\n[root@controller ~]# kubectl create role app-creator --verb=get --verb=list --verb=watch --verb=create --verb=update --verb=patch --verb=delete --resource=pods,deployment,daemonset,statefulset -n access\r\nrole.rbac.authorization.k8s.io\/app-creator created\r\n\r\n[root@controller ~]# kubectl describe role app-creator -n access\r\nName:         app-creator\r\nLabels:       &lt;none&gt;\r\nAnnotations:  &lt;none&gt;\r\nPolicyRule:\r\n  Resources          Non-Resource URLs  Resource Names  Verbs\r\n  ---------          -----------------  --------------  -----\r\n  pods               []                 []              [get list watch create update patch delete]\r\n  daemonsets.apps    []                 []              [get list watch create update patch delete]\r\n  deployments.apps   []                 []              [get list watch create update patch delete]\r\n  statefulsets.apps  []                 []              [get list watch create update patch delete]\r\n\r\n[root@controller ~]# kubectl get clusterroles | more\r\nNAME                                                                   CREATED AT\r\nadmin                                                                  2024-02-15T16:08:33Z\r\n...                                                      2024-02-15T16:08:33Z\r\nedit                                                                   2024-02-15T16:08:33Z\r\nkubeadm:get-nodes                                                      2024-02-15T16:08:34Z\r\nsystem:aggregate-to-admin                                              2024-02-15T16:08:33Z\r\nsystem:aggregate-to-edit                                               2024-02-15T16:08:33Z\r\nsystem:aggregate-to-view                                               2024-02-15T16:08:33Z\r\nsystem:auth-delegator                                                  2024-02-15T16:08:33Z\r\nsystem:basic-user                                                      2024-02-15T16:08:33Z\r\nsystem:certificates.k8s.io:certificatesigningrequests:nodeclient       2024-02-15T16:08:33Z\r\nsystem:certificates.k8s.io:certificatesigningrequests:selfnodeclient   2024-02-15T16:08:33Z\r\nsystem:certificates.k8s.io:kube-apiserver-client-approver              2024-02-15T16:08:33Z\r\nsystem:certificates.k8s.io:kube-apiserver-client-kubelet-approver      2024-02-15T16:08:33Z\r\nsystem:certificates.k8s.io:kubelet-serving-approver                    2024-02-15T16:08:33Z\r\nsystem:certificates.k8s.io:legacy-unknown-approver                     2024-02-15T16:08:33Z\r\nsystem:controller:attachdetach-controller                              2024-02-15T16:08:33Z\r\nsystem:controller:certificate-controller                               2024-02-15T16:08:33Z\r\nsystem:controller:clusterrole-aggregation-controller                   2024-02-15T16:08:33Z\r\nsystem:controller:cronjob-controller                                   2024-02-15T16:08:33Z\r\nsystem:controller:daemon-set-controller                                2024-02-15T16:08:33Z\r\nsystem:controller:deployment-controller                                2024-02-15T16:08:33Z\r\nsystem:controller:disruption-controller                                2024-02-15T16:08:33Z\r\nsystem:controller:endpoint-controller                                  2024-02-15T16:08:33Z\r\nsystem:controller:endpointslice-controller                             2024-02-15T16:08:33Z\r\nsystem:controller:endpointslicemirroring-controller                    2024-02-15T16:08:33Z\r\nsystem:controller:ephemeral-volume-controller                          2024-02-15T16:08:33Z\r\nsystem:controller:expand-controller                                    2024-02-15T16:08:33Z\r\nsystem:controller:generic-garbage-collector                            2024-02-15T16:08:33Z\r\nsystem:controller:horizontal-pod-autoscaler                            2024-02-15T16:08:33Z\r\nsystem:controller:job-controller                                       2024-02-15T16:08:33Z\r\nsystem:controller:namespace-controller                                 2024-02-15T16:08:33Z\r\nsystem:controller:node-controller                                      2024-02-15T16:08:33Z\r\nsystem:controller:persistent-volume-binder                             2024-02-15T16:08:33Z\r\nsystem:controller:pod-garbage-collector                                2024-02-15T16:08:33Z\r\n\r\n\r\n[root@controller ~]# kubectl create sa app-creator -n access\r\nserviceaccount\/app-creator created\r\n\r\n[root@controller ~]# kubectl create rolebinding  app-creator --role=app-creator --serviceaccount=access:app-creator -n access\r\nrolebinding.rbac.authorization.k8s.io\/app-creator created\r\n\r\n[root@controller ~]# kubectl get role,rolebinding,serviceaccount -n access\r\nNAME                                         CREATED AT\r\nrole.rbac.authorization.k8s.io\/app-creator   2024-02-19T12:31:59Z\r\n\r\nNAME                                                ROLE               AGE\r\nrolebinding.rbac.authorization.k8s.io\/app-creator   Role\/app-creator   58s\r\n\r\nNAME                         SECRETS   AGE\r\nserviceaccount\/app-creator   0         3m7s\r\nserviceaccount\/default       0         22h\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Configuring Taints and Tolerations<\/span><\/p>\n<ul>\n<li>Configure node worker2 such that it will only allow Pods to run that have<br \/>\nbeen configured with the setting type:db<\/li>\n<li>After verifying this works, remove the node restriction to return to normal operation.<\/li>\n<\/ul>\n<p>Go to the kubernetes documentation page -&gt; <em>search: taint -&gt;\u00a0 Taints and Tolerations<\/em><\/p>\n<pre class=\"lang:default mark:26-30 decode:true\">[root@controller ~]# kubectl taint nodes worker2.example.com type=db:NoSchedule\r\nnode\/worker2.example.com tainted\r\n\r\n[root@controller ~]# kubectl create deploy tolerate-nginx --image=nginx --replicas=3 --dry-run=client -o yaml &gt; lab1212.yaml\r\n[root@controller ~]# vim lab1212.yaml\r\n[root@controller ~]# cat lab1212.yaml\r\napiVersion: apps\/v1\r\nkind: Deployment\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    app: tolerate-nginx\r\n  name: tolerate-nginx\r\nspec:\r\n  replicas: 3\r\n  selector:\r\n    matchLabels:\r\n      app: tolerate-nginx\r\n  strategy: {}\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: tolerate-nginx\r\n    spec:\r\n      tolerations:\r\n       - key: \"type\"\r\n         operator: \"Equal\"\r\n         value: \"db\"\r\n         effect: \"NoSchedule\"\r\n      containers:\r\n      - image: nginx\r\n        name: nginx\r\n        resources: {}\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl apply -f lab1212.yaml\r\ndeployment.apps\/tolerate-nginx created\r\n\r\n[root@controller ~]# kubectl get pods -o wide\r\nNAME                              READY   STATUS    RESTARTS       AGE   IP              NODE                  NOMINATED NODE   READINESS GATES\r\nlab124deploy-7c7c8457f9-lclk4     1\/1     Running   1 (107m ago)   46h   192.168.0.167   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-4drk6      1\/1     Running   1 (107m ago)   24h   192.168.0.166   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-lhmfs      1\/1     Running   1 (97m ago)    24h   192.168.0.177   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-zw5fq      1\/1     Running   1 (97m ago)    24h   192.168.0.175   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-bl5vt   1\/1     Running   0              8s    192.168.0.183   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-dm27q   1\/1     Running   0              8s    192.168.0.182   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-h2vrf   1\/1     Running   0              8s    192.168.0.184   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\n\r\n[root@controller ~]# kubectl get pods -o wide\r\nNAME                              READY   STATUS    RESTARTS       AGE   IP              NODE                  NOMINATED NODE   READINESS GATES\r\nlab124deploy-7c7c8457f9-lclk4     1\/1     Running   1 (108m ago)   46h   192.168.0.167   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-4drk6      1\/1     Running   1 (108m ago)   24h   192.168.0.166   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-lhmfs      1\/1     Running   1 (98m ago)    24h   192.168.0.177   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-zw5fq      1\/1     Running   1 (98m ago)    24h   192.168.0.175   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-bl5vt   1\/1     Running   0              69s   192.168.0.183   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-dm27q   1\/1     Running   0              69s   192.168.0.182   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-h2vrf   1\/1     Running   0              69s   192.168.0.184   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\n\r\n[root@controller ~]# kubectl create deploy test-deploy --image=nginx --replicas=4\r\ndeployment.apps\/test-deploy created\r\n\r\n[root@controller ~]# kubectl get pods -o wide\r\nNAME                              READY   STATUS    RESTARTS       AGE     IP              NODE                  NOMINATED NODE   READINESS GATES\r\nlab124deploy-7c7c8457f9-lclk4     1\/1     Running   1 (110m ago)   46h     192.168.0.167   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-4drk6      1\/1     Running   1 (110m ago)   24h     192.168.0.166   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-lhmfs      1\/1     Running   1 (99m ago)    24h     192.168.0.177   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab126deploy-fff46cd4b-zw5fq      1\/1     Running   1 (99m ago)    24h     192.168.0.175   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntest-deploy-859f95ffcc-8st6q      1\/1     Running   0              5s      192.168.0.186   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntest-deploy-859f95ffcc-bcxfl      1\/1     Running   0              5s      192.168.0.188   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntest-deploy-859f95ffcc-g9t6k      1\/1     Running   0              5s      192.168.0.185   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntest-deploy-859f95ffcc-xw2gv      1\/1     Running   0              5s      192.168.0.187   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-bl5vt   1\/1     Running   0              2m31s   192.168.0.183   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-dm27q   1\/1     Running   0              2m31s   192.168.0.182   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\ntolerate-nginx-74bb955695-h2vrf   1\/1     Running   0              2m31s   192.168.0.184   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\n\r\n[root@controller ~]# kubectl delete deploy test-deploy\r\ndeployment.apps \"test-deploy\" deleted\r\n\r\n[root@controller ~]# kubectl taint nodes worker2.example.com type=db:NoSchedule-\r\nnode\/worker2.example.com untainted\r\n\r\n[root@controller ~]# kubectl delete -f lab1212.yaml\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Configuring a High Availability Cluster<\/span><\/p>\n<ul>\n<li>Configure a High Availability cluster with three control plane nodes and two<br \/>\nworker nodes.<\/li>\n<li>Ensure that each control plane node can be used as a client as well.<\/li>\n<li>Use the scripts provided in the course Git repository at https:\/\/github.com\/sandervanvugt\/cka to install the CRI, kubetools and load balancer.<\/li>\n<\/ul>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Etcd Backup and Restore<\/span><\/p>\n<p>Note: all tasks from here on should be performed on a non-HA cluster<\/p>\n<ul>\n<li>Before creating the backup, create a Deployment that runs nginx.<\/li>\n<li>Create a backup of the etcd and write it to \/tmp\/etcdbackup.<\/li>\n<li>Delete the Deployment you just created.<\/li>\n<li>Restore the backup that you have created in the first step of this procedure and verify that the Deployment is available again.<\/li>\n<\/ul>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Performing a Control Node Upgrade<\/span><\/p>\n<ul>\n<li>Notice that this task requires you to have a control node running an older<br \/>\nversion of Kubernetes available.<\/li>\n<li>Update the control node to the latest version of Kubernetes.<\/li>\n<li>Ensure that the kubelet and kubectl are updated as well.<\/li>\n<\/ul>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Configuring Application Logging<\/span><\/p>\n<ul>\n<li>Create a Pod with a logging agent that runs as a sidecar container.<\/li>\n<li>Configure the main application to use Busybox and run the Linux date command every minute. The result of this command should be written to the directory<code> \/output\/date.log<\/code>.<\/li>\n<li>Set up a sidecar container that runs Nginx and provide access to the date.log file on <code>\/usr\/share\/nginx\/html\/date.log<\/code>.<\/li>\n<\/ul>\n<p>Go to the kubernetes documentation page -&gt; <em>search: logging-&gt; Logging Architecture<br \/>\n<\/em><\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# vi lab135.yaml\r\n[root@controller ~]# cat lab135.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: counter\r\nspec:\r\n  containers:\r\n  - name: count\r\n    image: busybox:1.28\r\n    args:\r\n    - \/bin\/sh\r\n    - -c\r\n    - &gt;\r\n      i=0;\r\n      while true;\r\n      do\r\n        echo \"$i: $(date)\" &gt;&gt; \/var\/log\/1.log;\r\n        echo \"$(date) INFO $i\" &gt;&gt; \/var\/log\/2.log;\r\n        i=$((i+1));\r\n        sleep 1;\r\n      done\r\n    volumeMounts:\r\n    - name: varlog\r\n      mountPath: \/var\/log\r\n  - name: count-log-1\r\n    image: busybox:1.28\r\n    args: [\/bin\/sh, -c, 'tail -n+1 -F \/var\/log\/1.log']\r\n    volumeMounts:\r\n    - name: varlog\r\n      mountPath: \/var\/log\r\n  - name: count-log-2\r\n    image: busybox:1.28\r\n    args: [\/bin\/sh, -c, 'tail -n+1 -F \/var\/log\/2.log']\r\n    volumeMounts:\r\n    - name: varlog\r\n      mountPath: \/var\/log\r\n  volumes:\r\n  - name: varlog\r\n    emptyDir: {}\r\n\r\n[root@controller ~]# vi lab135.yaml\r\n[root@controller ~]# cat lab135.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: counter\r\nspec:\r\n  containers:\r\n  - name: count\r\n    image: busybox\r\n    args:\r\n    - \/bin\/sh\r\n    - -c\r\n    - &gt;\r\n      while sleep 60;\r\n      do\r\n        echo \"$(date)\" &gt;&gt; \/output\/date.log\r\n      done\r\n    volumeMounts:\r\n    - name: varlog\r\n      mountPath: \/output\r\n  - name: count-log-1\r\n    image: nginx\r\n    volumeMounts:\r\n    - name: varlog\r\n      mountPath: \/usr\/share\/nginx\/html\r\n  volumes:\r\n  - name: varlog\r\n    emptyDir: {}\r\n\r\n[root@controller ~]# kubectl apply -f lab135.yaml\r\npod\/counter created\r\n[root@controller ~]# kubectl describe pod counter\r\nName:             counter\r\nNamespace:        default\r\nPriority:         0\r\nService Account:  default\r\nNode:             worker1.example.com\/172.30.9.26\r\nStart Time:       Mon, 19 Feb 2024 11:32:05 -0500\r\nLabels:           &lt;none&gt;\r\nAnnotations:      cni.projectcalico.org\/containerID: 749bc21f17631ce5c99e0012bb32da1713812b25fe6c3257b8f3dcb3a7195c0e\r\n                  cni.projectcalico.org\/podIP: 192.168.0.189\/32\r\n                  cni.projectcalico.org\/podIPs: 192.168.0.189\/32\r\nStatus:           Running\r\nIP:               192.168.0.189\r\nIPs:\r\n  IP:  192.168.0.189\r\nContainers:\r\n  count:\r\n    Container ID:  containerd:\/\/beeb87d6485daca25884b9cfda033aa029fae1f9a16c7ea99c6f315ab29dbf4f\r\n    Image:         busybox\r\n    Image ID:      docker.io\/library\/busybox@sha256:6d9ac9237a84afe1516540f40a0fafdc86859b2141954b4d643af7066d598b74\r\n    Port:          &lt;none&gt;\r\n    Host Port:     &lt;none&gt;\r\n    Args:\r\n      \/bin\/sh\r\n      -c\r\n      while sleep 60; do\r\n        echo \"$(date)\" &gt;&gt; \/output\/date.log\r\n      done\r\n\r\n    State:          Running\r\n      Started:      Mon, 19 Feb 2024 11:32:07 -0500\r\n    Ready:          True\r\n    Restart Count:  0\r\n    Environment:    &lt;none&gt;\r\n    Mounts:\r\n      \/output from varlog (rw)\r\n      \/var\/run\/secrets\/kubernetes.io\/serviceaccount from kube-api-access-c6f7x (ro)\r\n  count-log-1:\r\n    Container ID:   containerd:\/\/0068e32f24123c84c4e4607bc9e4f6f709aba7af956092ee45204101d3430780\r\n...\r\n[root@controller ~]# kubectl exec -it counter -c count-log-1 -- cat \/usr\/share\/nginx\/html\/date.log\r\nMon Feb 19 16:33:07 UTC 2024\r\nMon Feb 19 16:34:07 UTC 2024\r\n\r\n[root@controller ~]# kubectl exec -it counter -c count-log-1 -- cat \/usr\/share\/nginx\/html\/date.log\r\nMon Feb 19 16:33:07 UTC 2024\r\nMon Feb 19 16:34:07 UTC 2024\r\nMon Feb 19 16:35:07 UTC 2024\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Managing Persistent Volume Claims<\/span><\/p>\n<ul>\n<li>Create a PerstistentVolume that uses 1GB of HostPath storage.<\/li>\n<li>Create a PersistentVolumeClaim that uses the PersistentVolume; the PersistentVolumeClaim should request 100 MiB of storage.<\/li>\n<li>Run a Pod with the name storage, using the Nginx image and mounting this PVC on the directory \/data.<\/li>\n<li>After creating the configuration, change the PersistentVolumeClaim to request a size of 200MiB.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true\">[root@controller cka]# cat resize_pvc.yaml\r\napiVersion: v1\r\nkind: Namespace\r\nmetadata:\r\n  name: myvol\r\n\r\n---\r\napiVersion: storage.k8s.io\/v1\r\nkind: StorageClass\r\nmetadata:\r\n  name: mystorageclass\r\nallowVolumeExpansion: true\r\nprovisioner: kubernetes.io\/no-provisioner\r\n\r\n---\r\napiVersion: v1\r\nkind: PersistentVolume\r\nmetadata:\r\n  name: mypv\r\nspec:\r\n  capacity:\r\n    storage: 1Gi\r\n  accessModes:\r\n    - ReadWriteOnce\r\n  persistentVolumeReclaimPolicy: Recycle\r\n  storageClassName: mystorageclass\r\n  hostPath:\r\n    path: \/tmp\/pv1\r\n\r\n---\r\napiVersion: v1\r\nkind: PersistentVolumeClaim\r\nmetadata:\r\n  name: mypvc\r\n  namespace: myvol\r\nspec:\r\n  accessModes:\r\n    - ReadWriteOnce\r\n  resources:\r\n    requests:\r\n      storage: 100Mi\r\n  storageClassName: mystorageclass\r\n\r\n---\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: pv-pod\r\n  namespace: myvol\r\nspec:\r\n  containers:\r\n    - name: busybox\r\n      image: busybox\r\n      args:\r\n        - sleep\r\n        - \"3600\"\r\n      volumeMounts:\r\n      - mountPath: \"\/vol1\"\r\n        name: myvolume\r\n  volumes:\r\n    - name: myvolume\r\n      persistentVolumeClaim:\r\n        claimName: mypvc\r\n\r\n[root@controller cka]# kubectl apply -f resize_pvc.yaml\r\nnamespace\/myvol created\r\nstorageclass.storage.k8s.io\/mystorageclass created\r\npersistentvolume\/mypv created\r\npersistentvolumeclaim\/mypvc created\r\npod\/pv-pod created\r\n\r\n[root@controller cka]# kubectl get pv,pvc -n myvol\r\nNAME                    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM         STORAGECLASS     REASON   AGE\r\npersistentvolume\/mypv   1Gi        RWO            Recycle          Bound    myvol\/mypvc   mystorageclass            3m48s\r\n\r\nNAME                          STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS     AGE\r\npersistentvolumeclaim\/mypvc   Bound    mypv     1Gi        RWO            mystorageclass   3m48s\r\n\r\n[root@controller cka]# kubectl edit pvc mypvc -n myvol\r\npersistentvolumeclaim\/mypvc edited\r\n<\/pre>\n<p>And change to:<\/p>\n<pre class=\"lang:default decode:true \">spec:\r\n  accessModes:\r\n  - ReadWriteOnce\r\n  resources:\r\n    requests:\r\n      storage: 200Mi\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Investigating Pod Logs<\/span><\/p>\n<ul>\n<li>Run a Pod with the name failingdb, which starts the mariadb image<br \/>\nwithout any further options (it should fail).<\/li>\n<li>Investigate the Pod logs and write all lines that start with ERROR to \/tmp\/failingdb.log<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true\">[root@controller ~]# kubectl run failingdb --image=mariadb\r\npod\/failingdb created\r\n\r\n[root@controller ~]# kubectl get pods\r\nNAME            READY   STATUS              RESTARTS       AGE\r\nbusybox-ready   0\/1     Running             12 (53m ago)   12h\r\nfailingdb       0\/1     ContainerCreating   0              6s\r\nliveness-exec   1\/1     Running             9 (44m ago)    9h\r\nnginx-probes    1\/1     Running             0              12h\r\n\r\n[root@controller ~]# kubectl get pods\r\nNAME            READY   STATUS             RESTARTS       AGE\r\nbusybox-ready   0\/1     Running            12 (53m ago)   12h\r\nfailingdb       0\/1     CrashLoopBackOff   1 (8s ago)     23s\r\nliveness-exec   1\/1     Running            9 (45m ago)    9h\r\nnginx-probes    1\/1     Running            0              12h\r\n[root@controller ~]# kubectl logs failingdb\r\n2024-03-06 06:46:27+00:00 [Note] [Entrypoint]: Entrypoint script for MariaDB Server 1:11.3.2+maria~ubu2204 started.\r\n2024-03-06 06:46:27+00:00 [Warn] [Entrypoint]: \/sys\/fs\/cgroup\/blkio:\/system.slice\/containerd.service\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a             4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b60e40b347b5c3f2e8d1231d766ba1e305dab\r\n11:pids:\/system.slice\/containerd.service\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b             60e40b347b5c3f2e8d1231d766ba1e305dab\r\n10:rdma:\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b60e40b347b5c3f2e8d1231d766ba1e30             5dab\r\n9:devices:\/system.slice\/containerd.service\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c6             5b60e40b347b5c3f2e8d1231d766ba1e305dab\r\n8:net_cls,net_prio:\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b60e40b347b5c3f2e8d123             1d766ba1e305dab\r\n7:cpuset:\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b60e40b347b5c3f2e8d1231d766ba1e3             05dab\r\n6:cpu,cpuacct:\/system.slice\/containerd.service\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f             23c65b60e40b347b5c3f2e8d1231d766ba1e305dab\r\n5:memory:\/system.slice\/containerd.service\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65             b60e40b347b5c3f2e8d1231d766ba1e305dab\r\n4:hugetlb:\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b60e40b347b5c3f2e8d1231d766ba1e             305dab\r\n3:perf_event:\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b60e40b347b5c3f2e8d1231d766b             a1e305dab\r\n2:freezer:\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66f23c65b60e40b347b5c3f2e8d1231d766ba1e             305dab\r\n1:name=systemd:\/system.slice\/containerd.service\/kubepods-besteffort-pod21510976_7a31_46ec_80dd_1a4ebd5ba06d.slice:cri-containerd:5a50cff6815a8d19a3c66             f23c65b60e40b347b5c3f2e8d1231d766ba1e305dab\r\n0::\/\/memory.pressure not writable, functionality unavailable to MariaDB\r\n2024-03-06 06:46:27+00:00 [Note] [Entrypoint]: Switching to dedicated user 'mysql'\r\n2024-03-06 06:46:27+00:00 [Note] [Entrypoint]: Entrypoint script for MariaDB Server 1:11.3.2+maria~ubu2204 started.\r\n2024-03-06 06:46:27+00:00 [ERROR] [Entrypoint]: Database is uninitialized and password option is not specified\r\n        You need to specify one of MARIADB_ROOT_PASSWORD, MARIADB_ROOT_PASSWORD_HASH, MARIADB_ALLOW_EMPTY_ROOT_PASSWORD and MARIADB_RANDOM_ROOT_PASSWO             RD\r\n\r\n[root@controller ~]# kubectl logs failingdb | tail -2\r\n2024-03-06 06:46:53+00:00 [ERROR] [Entrypoint]: Database is uninitialized and password option is not specified\r\n        You need to specify one of MARIADB_ROOT_PASSWORD, MARIADB_ROOT_PASSWORD_HASH, MARIADB_ALLOW_EMPTY_ROOT_PASSWORD and MARIADB_RANDOM_ROOT_PASSWORD\r\n\r\n[root@controller ~]# kubectl logs failingdb | tail -2 &gt; \/tmp\/failingdb.log\r\n\r\n[root@controller ~]# cat \/tmp\/failingdb.log\r\n2024-03-06 06:47:42+00:00 [ERROR] [Entrypoint]: Database is uninitialized and password option is not specified\r\n        You need to specify one of MARIADB_ROOT_PASSWORD, MARIADB_ROOT_PASSWORD_HASH, MARIADB_ALLOW_EMPTY_ROOT_PASSWORD and MARIADB_RANDOM_ROOT_PASSWORD\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Analyzing Performance<\/span><\/p>\n<ul>\n<li>Find out which Pod currently has the highest CPU load.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true\">kubectl top pods\r\nkubectl apply -f https:\/\/github.com\/kubernetes-sigs\/metrics-server\/releases\/latest\/download\/components.yaml\r\nkubectl get pods -n kube-system\r\nkubectl logs metrics-server-6db4d75b97-d57gq\r\nkubectl edit -n kube-system deploy metrics-server\r\n<\/pre>\n<p>Change<\/p>\n<pre class=\"lang:default decode:true \">    spec:\r\n      containers:\r\n      - args:\r\n        - --cert-dir=\/tmp\r\n        - --secure-port=10250\r\n        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\r\n        - --kubelet-use-node-status-port\r\n        - --metric-resolution=15s\r\n<\/pre>\n<p>to<\/p>\n<pre class=\"lang:default decode:true \">    spec:\r\n      containers:\r\n      - args:\r\n        - --cert-dir=\/tmp\r\n        - --kubelet-insecure-tls\r\n        - --secure-port=10250\r\n        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname\r\n        - --kubelet-use-node-status-port\r\n        - --metric-resolution=15s\r\n<\/pre>\n<p>And<\/p>\n<pre class=\"lang:default decode:true \">kubectl top pods<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Managing Scheduling<\/span><\/p>\n<ul>\n<li>Runa Pod with the name lab139pod.<\/li>\n<li>Ensure that it only runs on nodes that have the label storage=ssd set.<\/li>\n<\/ul>\n<pre class=\"lang:default mark:34,35,50 decode:true\">[root@controller ~]# kubectl label node worker1.example.com storage=ssd\r\nnode\/worker1.example.com labeled\r\n\r\n[root@controller ~]# kubectl run lab139pod --image=nginx --dry-run=client -o yaml &gt; lab139pod.yaml\r\n\r\n[root@controller ~]# cat lab139pod.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: lab139pod\r\n  name: lab139pod\r\nspec:\r\n  containers:\r\n  - image: nginx\r\n    name: lab139pod\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# vim lab139pod.yaml\r\n\r\n[root@controller ~]# cat lab139pod.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: lab139pod\r\n  name: lab139pod\r\nspec:\r\n  nodeSelector:\r\n    storage: ssd\r\n  containers:\r\n  - image: nginx\r\n    name: lab139pod\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl apply -f lab139pod.yaml\r\npod\/lab139pod created\r\n\r\n[root@controller ~]# kubectl get pods -o wide\r\nNAME            READY   STATUS    RESTARTS       AGE   IP               NODE                  NOMINATED NODE   READINESS GATES\r\nbusybox-ready   0\/1     Running   18 (12m ago)   18h   172.16.71.203    worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nlab139pod       1\/1     Running   0              10s   172.16.102.142   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nliveness-exec   1\/1     Running   14 (60m ago)   15h   172.16.71.204    worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nnginx-probes    1\/1     Running   0              17h   172.16.102.139   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Configuring Ingress<\/span><\/p>\n<ul>\n<li>\u00a0Run a Pod with the name lab1310pod, using the Nginx image.<\/li>\n<li>Expose this Pod using a NodePort type Service.<\/li>\n<li>Configure Ingress such that its web content is available on the path lab1310.info\/hi<\/li>\n<li>You will not have to configure an Ingress controller for this assignment, just the API resource is enough<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true \">[root@controller ~]#  kubectl run lab1310pod --image=nginx\r\npod\/lab1310pod created\r\n\r\n[root@controller ~]# kubectl expose pod lab1310pod --port=80 --type=NodePort\r\nservice\/lab1310pod exposed\r\n\r\n[root@controller ~]# kubectl get svc\r\nNAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE\r\nkubernetes   ClusterIP   10.96.0.1       &lt;none&gt;        443\/TCP        5d22h\r\nlab1310pod   NodePort    10.102.16.147   &lt;none&gt;        80:30453\/TCP   12s\r\n\r\n[root@controller ~]#  kubectl get pods -o wide\r\nNAME         READY   STATUS    RESTARTS   AGE   IP              NODE                  NOMINATED NODE   READINESS GATES\r\nlab1310pod   1\/1     Running   0          30s   172.16.71.211   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\n\r\n[root@controller ~]# kubectl create ingress -h | more\r\n...\r\nExamples:\r\n  # Create a single ingress called 'simple' that directs requests to foo.com\/bar to svc\r\n  # svc1:8080 with a TLS secret \"my-cert\"\r\n  kubectl create ingress simple --rule=\"foo.com\/bar=svc1:8080,tls=my-cert\"\r\n...\r\n\r\n[root@controller ~]# kubectl create ingress simple --rule=\"lab1310.info\/hi=lab1310pod:80\"\r\ningress.networking.k8s.io\/simple created\r\n\r\n[root@controller ~]# kubectl describe ingress simple\r\nName:             simple\r\nLabels:           &lt;none&gt;\r\nNamespace:        default\r\nAddress:\r\nIngress Class:    &lt;none&gt;\r\nDefault backend:  &lt;default&gt;\r\nRules:\r\n  Host          Path  Backends\r\n  ----          ----  --------\r\n  lab1310.info\r\n                \/hi   lab1310pod:80 (172.16.71.211:80)\r\nAnnotations:    &lt;none&gt;\r\nEvents:         &lt;none&gt;\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Preparing for Node Maintenance<\/span><\/p>\n<ul>\n<li>Schedule node worker2 for maintenance in such a way that all run<br \/>\nPods are evicted.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl drain worker2.example.com\r\nnode\/worker2.example.com cordoned\r\nerror: unable to drain node \"worker2.example.com\" due to error:[cannot delete Pods declare no controller (use --force to override): default\/lab1310pod, myvol\/pv-pod, cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system\/calico-node-d2plz, kube-system\/kube-proxy-jz8hj, cannot delete Pods with local storage (use --delete-emptydir-data to override): kube-system\/metrics-server-6db4d75b97-d57gq], continuing command...\r\nThere are pending nodes to be drained:\r\n worker2.example.com\r\ncannot delete Pods declare no controller (use --force to override): default\/lab1310pod, myvol\/pv-pod\r\ncannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system\/calico-node-d2plz, kube-system\/kube-proxy-jz8hj\r\ncannot delete Pods with local storage (use --delete-emptydir-data to override): kube-system\/metrics-server-6db4d75b97-d57gq\r\n\r\n[root@controller ~]# kubectl drain worker2.example.com --ignore-daemonsets --force --delete-emptydir-data\r\nnode\/worker2.example.com already cordoned\r\nWarning: deleting Pods that declare no controller: default\/lab1310pod, myvol\/pv-pod; ignoring DaemonSet-managed Pods: kube-system\/calico-node-d2plz, kube-system\/kube-proxy-jz8hj\r\nevicting pod myvol\/pv-pod\r\nevicting pod default\/lab1310pod\r\nevicting pod kube-system\/metrics-server-6db4d75b97-d57gq\r\npod\/lab1310pod evicted\r\npod\/metrics-server-6db4d75b97-d57gq evicted\r\npod\/pv-pod evicted\r\nnode\/worker2.example.com drained\r\n\r\n[root@controller ~]# kubectl get nodes\r\nNAME                     STATUS                     ROLES           AGE     VERSION\r\ncontroller.example.com   NotReady                   control-plane   5d23h   v1.28.2\r\nworker1.example.com      Ready                      &lt;none&gt;          5d23h   v1.28.2\r\nworker2.example.com      Ready,SchedulingDisabled   &lt;none&gt;          5d23h   v1.28.2\r\n\r\n[root@controller ~]# kubectl uncordon worker2.example.com\r\nnode\/worker2.example.com uncordoned\r\n\r\n[root@controller ~]# kubectl get nodes\r\nNAME                     STATUS     ROLES           AGE     VERSION\r\ncontroller.example.com   NotReady   control-plane   5d23h   v1.28.2\r\nworker1.example.com      Ready      &lt;none&gt;          5d23h   v1.28.2\r\nworker2.example.com      Ready      &lt;none&gt;          5d23h   v1.28.2\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Scaling Applications<\/span><\/p>\n<ul>\n<li>Run a Deployment with the name lab1312deploy using the nginx image<\/li>\n<li>Scale it such that it runs 6 application instances.<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl create deploy lab1312deploy --image=nginx\r\ndeployment.apps\/lab1312deploy created\r\n\r\n[root@controller ~]# kubectl get all --selector app=lab1312deploy\r\nNAME                                 READY   STATUS    RESTARTS   AGE\r\npod\/lab1312deploy-54478d59c6-pl922   1\/1     Running   0          24s\r\n\r\nNAME                            READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/lab1312deploy   1\/1     1            1           24s\r\n\r\nNAME                                       DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/lab1312deploy-54478d59c6   1         1         1       24s\r\n\r\n[root@controller ~]# kubectl scale deployment lab1312deploy --replicas=6\r\ndeployment.apps\/lab1312deploy scaled\r\n\r\n[root@controller ~]# kubectl get all --selector app=lab1312deploy\r\nNAME                                 READY   STATUS    RESTARTS   AGE\r\npod\/lab1312deploy-54478d59c6-64dm6   1\/1     Running   0          7s\r\npod\/lab1312deploy-54478d59c6-c7gxn   1\/1     Running   0          7s\r\npod\/lab1312deploy-54478d59c6-ns5m5   1\/1     Running   0          7s\r\npod\/lab1312deploy-54478d59c6-pl922   1\/1     Running   0          113s\r\npod\/lab1312deploy-54478d59c6-s7gnz   1\/1     Running   0          7s\r\npod\/lab1312deploy-54478d59c6-wjwng   1\/1     Running   0          7s\r\n\r\nNAME                            READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/lab1312deploy   6\/6     6            6           113s\r\n\r\nNAME                                       DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/lab1312deploy-54478d59c6   6         6         6       113s\r\n<\/pre>\n<p>&nbsp;<\/p>\n","protected":false},"excerpt":{"rendered":"","protected":false},"author":1,"featured_media":5941,"comment_status":"closed","ping_status":"closed","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[99],"tags":[],"_links":{"self":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts\/5494"}],"collection":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/comments?post=5494"}],"version-history":[{"count":48,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts\/5494\/revisions"}],"predecessor-version":[{"id":5942,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts\/5494\/revisions\/5942"}],"wp:featuredmedia":[{"embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/media\/5941"}],"wp:attachment":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/media?parent=5494"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/categories?post=5494"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/tags?post=5494"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}