{"id":5665,"date":"2024-02-17T17:05:13","date_gmt":"2024-02-17T16:05:13","guid":{"rendered":"http:\/\/miro.borodziuk.eu\/?p=5665"},"modified":"2025-05-05T19:16:34","modified_gmt":"2025-05-05T17:16:34","slug":"kubernetes-excercises-2","status":"publish","type":"post","link":"http:\/\/miro.borodziuk.eu\/index.php\/2024\/02\/17\/kubernetes-excercises-2\/","title":{"rendered":"Kubernetes Excercises 2"},"content":{"rendered":"<p><!--more--><\/p>\n<p><span style=\"color: #3366ff;\">Working with Namespaces<\/span><\/p>\n<p>Create a Namespace ckad-ns1 in your cluster. In this Namespace, run<br \/>\nthe following Pods:<\/p>\n<p>1. A Pod with the name pod-a, running the httpd server image<\/p>\n<p>2. A Pod with the name pod-b, running the nginx server image as well<br \/>\nas the alpine image<\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl create ns ckad-ns1\r\nnamespace\/ckad-ns1 created\r\n\r\n[root@controller ~]# kubectl get ns\r\nNAME              STATUS   AGE\r\nckad-ns1          Active   5s\r\ndefault           Active   6d19h\r\nkube-node-lease   Active   6d19h\r\nkube-public       Active   6d19h\r\nkube-system       Active   6d19h\r\nmyvol             Active   37h\r\n\r\n[root@controller ~]# kubectl run pod-a --image=httpd -n ckad-ns1\r\npod\/pod-a created\r\n\r\n[root@controller ~]# kubectl run pod-b -n ckad-ns1 --image=alpine --dry-run=client -o yaml -- sleep 3600  &gt; task1.yaml\r\n\r\n[root@controller ~]# cat task1.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: pod-b\r\n  name: pod-b\r\n  namespace: ckad-ns1\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: alpine\r\n    name: pod-b\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# vim task1.yaml\r\n\r\n[root@controller ~]# cat task1.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: pod-b\r\n  name: pod-b\r\n  namespace: ckad-ns1\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: alpine\r\n    name: alpine\r\n    resources: {}\r\n  - name: nginx\r\n    image: nginx\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl create -f task1.yaml\r\npod\/pod-b created\r\n\r\n[root@controller ~]# kubectl get pods -n ckad-ns1\r\nNAME    READY   STATUS    RESTARTS   AGE\r\npod-a   1\/1     Running   0          10m\r\npod-b   2\/2     Running   0          17s\r\n\r\n[root@controller ~]# kubectl describe pod pod-b -n ckad-ns1\r\nName:             pod-b\r\nNamespace:        ckad-ns1\r\nPriority:         0\r\nService Account:  default\r\nNode:             worker2.example.com\/172.30.9.27\r\nStart Time:       Thu, 07 Mar 2024 07:14:13 -0500\r\nLabels:           run=pod-b\r\nAnnotations:      cni.projectcalico.org\/containerID: f311336c22851ad11ff2fa155dc1c5332f11b14884e25fe537277657662e706d\r\n                  cni.projectcalico.org\/podIP: 172.16.71.219\/32\r\n                  cni.projectcalico.org\/podIPs: 172.16.71.219\/32\r\nStatus:           Running\r\nIP:               172.16.71.219\r\nIPs:\r\n  IP:  172.16.71.219\r\nContainers:\r\n  alpine:\r\n    Container ID:  containerd:\/\/86d25366b5f7e1d7977286263b8d9c44d6cf2f9542d67bcfeeff6a1005304de4\r\n    Image:         alpine\r\n    Image ID:      docker.io\/library\/alpine@sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b\r\n    Port:          &lt;none&gt;\r\n    Host Port:     &lt;none&gt;\r\n    Args:\r\n      sleep\r\n      3600\r\n    State:          Running\r\n      Started:      Thu, 07 Mar 2024 07:14:15 -0500\r\n    Ready:          True\r\n    Restart Count:  0\r\n    Environment:    &lt;none&gt;\r\n    Mounts:\r\n      \/var\/run\/secrets\/kubernetes.io\/serviceaccount from kube-api-access-tkw6v (ro)\r\n  nginx:\r\n    Container ID:   containerd:\/\/3ea012e6a6173466fe38315d1e7a57f274b10307bfd98dafda5d69837b3ebd0c\r\n    Image:          nginx\r\n    Image ID:       docker.io\/library\/nginx@sha256:25ff478171a2fd27d61a1774d97672bb7c13e888749fc70c711e207be34d370a\r\n    Port:           &lt;none&gt;\r\n    Host Port:      &lt;none&gt;\r\n    State:          Running\r\n      Started:      Thu, 07 Mar 2024 07:14:16 -0500\r\n    Ready:          True\r\n    Restart Count:  0\r\n    Environment:    &lt;none&gt;\r\n    Mounts:\r\n      \/var\/run\/secrets\/kubernetes.io\/serviceaccount from kube-api-access-tkw6v (ro)\r\nConditions:\r\n  Type              Status\r\n  Initialized       True\r\n  Ready             True\r\n  ContainersReady   True\r\n  PodScheduled      True\r\nVolumes:\r\n  kube-api-access-tkw6v:\r\n    Type:                    Projected (a volume that contains injected data from multiple sources)\r\n    TokenExpirationSeconds:  3607\r\n    ConfigMapName:           kube-root-ca.crt\r\n    ConfigMapOptional:       &lt;nil&gt;\r\n    DownwardAPI:             true\r\nQoS Class:                   BestEffort\r\nNode-Selectors:              &lt;none&gt;\r\nTolerations:                 node.kubernetes.io\/not-ready:NoExecute op=Exists for 300s\r\n                             node.kubernetes.io\/unreachable:NoExecute op=Exists for 300s\r\nEvents:\r\n  Type    Reason     Age   From               Message\r\n  ----    ------     ----  ----               -------\r\n  Normal  Scheduled  58s   default-scheduler  Successfully assigned ckad-ns1\/pod-b to worker2.example.com\r\n  Normal  Pulling    57s   kubelet            Pulling image \"alpine\"\r\n  Normal  Pulled     56s   kubelet            Successfully pulled image \"alpine\" in 952ms (952ms including waiting)\r\n  Normal  Created    56s   kubelet            Created container alpine\r\n  Normal  Started    56s   kubelet            Started container alpine\r\n  Normal  Pulling    56s   kubelet            Pulling image \"nginx\"\r\n  Normal  Pulled     55s   kubelet            Successfully pulled image \"nginx\" in 923ms (923ms including waiting)\r\n  Normal  Created    55s   kubelet            Created container nginx\r\n  Normal  Started    55s   kubelet            Started container nginx\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Using Secrets<\/span><\/p>\n<p>Create a Secret that defines the variable password=secret. Create a<br \/>\nDeployment with the name secretapp, which starts the nginx image<br \/>\nand uses this variable.<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# kubectl create secret -h | more\r\nCreate a secret with specified type.\r\n...\r\nAvailable Commands:\r\n  docker-registry   Create a secret for use with a Docker registry\r\n  generic           Create a secret from a local file, directory, or literal value\r\n  tls               Create a TLS secret\r\n...\r\n\r\n[root@controller ~]# kubectl create secret generic -h | more\r\n...\r\n  # Create a new secret named my-secret with key1=supersecret and key2=topsecret\r\n  kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret\r\n...\r\n\r\n[root@controller ~]# kubectl create secret generic secretpw --from-literal=password=secret\r\nsecret\/secretpw created\r\n\r\n[root@controller ~]# kubectl describe secrets secretpw\r\nName:         secretpw\r\nNamespace:    default\r\nLabels:       &lt;none&gt;\r\nAnnotations:  &lt;none&gt;\r\n\r\nType:  Opaque\r\n\r\nData\r\n====\r\npassword:  6 bytes\r\n\r\n[root@controller ~]# kubectl create deploy secretap --image=nginx\r\n...\r\n  kubectl set env --from=secret\/mysecret deployment\/myapp\r\n...\r\n\r\n[root@controller ~]# kubectl set env --from=secret\/secretpw deployment\/secretap\r\nWarning: key password transferred to PASSWORD\r\ndeployment.apps\/secretap env updated\r\n\r\n[root@controller ~]# kubectl get pods\r\nNAME                        READY   STATUS    RESTARTS   AGE\r\nsecretap-7b54c85f6d-qq6ll   1\/1     Running   0          28m\r\n\r\n[root@controller ~]# kubectl exec secretap-7b54c85f6d-qq6ll -- env\r\n...\r\nPASSWORD=secret\r\n...\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Creating Custom Images<\/span><\/p>\n<p>Create a Dockerfile that runs an alpine image with the command &#8220;echo<br \/>\nhello world&#8221; as the default command. Build the image, and export it in<br \/>\nOCI format to a tar file with the name &#8220;greetworld&#8221;.<\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# vim Dockerfile\r\n\r\n[root@controller ~]# cat Dockerfile\r\nFROM alpine\r\nCMD [\"echo\",\"hello world\"]\r\n\r\n[root@controller ~]# docker build -t greetworld .\r\n[+] Building 3.1s (5\/5) FINISHED                                                                                                                                                      docker:default\r\n =&gt; [internal] load build definition from Dockerfile                                                                                                                                            0.1s\r\n =&gt; =&gt; transferring dockerfile: 76B                                                                                                                                                             0.0s\r\n =&gt; [internal] load metadata for docker.io\/library\/alpine:latest                                                                                                                                2.1s\r\n =&gt; [internal] load .dockerignore                                                                                                                                                               0.0s\r\n =&gt; =&gt; transferring context: 2B                                                                                                                                                                 0.0s\r\n =&gt; [1\/1] FROM docker.io\/library\/alpine:latest@sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b                                                                          0.7s\r\n =&gt; =&gt; resolve docker.io\/library\/alpine:latest@sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b                                                                          0.0s\r\n =&gt; =&gt; sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b 1.64kB \/ 1.64kB                                                                                                  0.0s\r\n =&gt; =&gt; sha256:6457d53fb065d6f250e1504b9bc42d5b6c65941d57532c072d929dd0628977d0 528B \/ 528B                                                                                                      0.0s\r\n =&gt; =&gt; sha256:05455a08881ea9cf0e752bc48e61bbd71a34c029bb13df01e40e3e70e0d007bd 1.47kB \/ 1.47kB                                                                                                  0.0s\r\n =&gt; =&gt; sha256:4abcf20661432fb2d719aaf90656f55c287f8ca915dc1c92ec14ff61e67fbaf8 3.41MB \/ 3.41MB                                                                                                  0.3s\r\n =&gt; =&gt; extracting sha256:4abcf20661432fb2d719aaf90656f55c287f8ca915dc1c92ec14ff61e67fbaf8                                                                                                       0.3s\r\n =&gt; exporting to image                                                                                                                                                                          0.0s\r\n =&gt; =&gt; exporting layers                                                                                                                                                                         0.0s\r\n =&gt; =&gt; writing image sha256:27735e917715dde71cfbf396ed13edeeef9603c18bfbdf750c437956d9c9779a                                                                                                    0.0s\r\n =&gt; =&gt; naming to docker.io\/library\/greetworld                                                                                                                                                   0.0s\r\n\r\n[root@controller ~]# docker images\r\nREPOSITORY               TAG       IMAGE ID       CREATED       SIZE\r\nmysshd                   latest    88ace079c865   12 days ago   231MB\r\nmyapache                 latest    3b7004c30a95   12 days ago   167MB\r\nnamp                     latest    6d3d4ddc5fcc   12 days ago   387MB\r\nnmap                     latest    6d3d4ddc5fcc   12 days ago   387MB\r\nubuntu                   latest    3db8720ecbf5   3 weeks ago   77.9MB\r\nmariadb                  latest    2f62d6fb2c8b   3 weeks ago   405MB\r\nlocalhost:5000\/mariadb   latest    2f62d6fb2c8b   3 weeks ago   405MB\r\ngreetworld               latest    27735e917715   5 weeks ago   7.38MB\r\nbusybox                  latest    3f57d9401f8d   7 weeks ago   4.26MB\r\nhttpd                    latest    2776f4da9d55   7 weeks ago   167MB\r\n\r\n[root@controller ~]# docker save --help\r\n\r\nUsage:  docker save [OPTIONS] IMAGE [IMAGE...]\r\n\r\nSave one or more images to a tar archive (streamed to STDOUT by default)\r\n\r\nAliases:\r\n  docker image save, docker save\r\n\r\nOptions:\r\n  -o, --output string   Write to a file, instead of STDOUT\r\n[root@controller ~]# docker save -o greetworld.tar greetworld\r\n[root@controller ~]# ls -l greet*\r\n-rw------- 1 root root 7677952 03-07 11:32 greetworld.tar\r\n\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Using Sidecars<\/span><\/p>\n<p>Create a Multi-container Pod with the name sidecar-pod, that runs in the<br \/>\nckad-ns3 Namespace<\/p>\n<ul>\n<li>The primary container is running busybox, and writes the output of<br \/>\nthe date command to the<code> \/var\/log\/date.log<\/code> file every 5 seconds<\/li>\n<li>The second container should run as a sidecar and provide nginx web-access to this file, using an hostPath shared volume. (mount on <code>\/usr\/share\/nginx\/html<\/code>)<\/li>\n<li>Make sure the image for this container is only pulled if it&#8217;s not available on the local system yet<\/li>\n<\/ul>\n<p>Go to the documentation<\/p>\n<p>1. search -&gt; Communicate Between Containers in the Same Pod<\/p>\n<p>2. search: hostpath -&gt; Configure a Pod to Use a PersistentVolume for Storage<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# vi task156.yaml\r\n[root@controller ~]# cat task156.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: two-containers\r\nspec:\r\n\r\n  restartPolicy: Never\r\n\r\n  volumes:\r\n  - name: shared-data\r\n    emptyDir: {}\r\n\r\n  containers:\r\n\r\n  - name: nginx-container\r\n    image: nginx\r\n    volumeMounts:\r\n    - name: shared-data\r\n      mountPath: \/usr\/share\/nginx\/html\r\n\r\n  - name: debian-container\r\n    image: debian\r\n    volumeMounts:\r\n    - name: shared-data\r\n      mountPath: \/pod-data\r\n    command: [\"\/bin\/sh\"]\r\n    args: [\"-c\", \"echo Hello from the debian container &gt; \/pod-data\/index.html\"]\r\n\r\n[root@controller ~]# vim task156.yaml\r\n[root@controller ~]# cat task156.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: sidecar-pod\r\n  namespace: ckad-ns3\r\nspec:\r\n\r\n  restartPolicy: Never\r\n\r\n  volumes:\r\n  - name: shared-data\r\n    hostPath:\r\n      path: \"\/mydata\"\r\n\r\n  containers:\r\n\r\n  - name: nginx-container\r\n    image: nginx\r\n    volumeMounts:\r\n    - name: shared-data\r\n      mountPath: \/usr\/share\/nginx\/html\r\n\r\n  - name: busybox-container\r\n    image: busybox\r\n    volumeMounts:\r\n    - name: shared-data\r\n      mountPath: \/var\/log\r\n    command: [\"\/bin\/sh\"]\r\n    args: [\"-c\", \"echo Hello from the debian container &gt; \/pod-data\/index.html\"]\r\n\r\n[root@controller ~]# kubectl run busybox --image=busybox --dry-run=client -o yaml -- sh -c \"while sleep 5; do date &gt;&gt; \/var\/log\/date.log; done\"\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: busybox\r\n  name: busybox\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sh\r\n    - -c\r\n    - while sleep 5; do date &gt;&gt; \/var\/log\/date.log; done\r\n    image: busybox\r\n    name: busybox\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# vim task156.yaml\r\n[root@controller ~]# cat task156.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: sidecar-pod\r\n  namespace: ckad-ns3\r\nspec:\r\n\r\n  restartPolicy: Never\r\n\r\n  volumes:\r\n  - name: shared-data\r\n    hostPath:\r\n      path: \"\/mydata\"\r\n\r\n  containers:\r\n\r\n  - name: nginx-container\r\n    image: nginx\r\n    volumeMounts:\r\n    - name: shared-data\r\n      mountPath: \/usr\/share\/nginx\/html\r\n\r\n  - name: busybox-container\r\n    image: busybox\r\n    volumeMounts:\r\n    - name: shared-data\r\n      mountPath: \/var\/log\r\n    args:\r\n    - sh\r\n    - -c\r\n    - while sleep 5; do date &gt;&gt; \/var\/log\/date.log; done\r\n\r\n[root@controller ~]# kubectl create ns ckad-ns3\r\nnamespace\/ckad-ns3 created\r\n\r\n[root@controller ~]# kubectl create -f task156.yaml\r\npod\/sidecar-pod created\r\n\r\n[root@controller ~]# kubectl exec -it sidecar-pod -n ckad-ns3 sh\r\nkubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.\r\nDefaulted container \"nginx-container\" out of: nginx-container, busybox-container\r\n\r\n# cat \/usr\/share\/nginx\/html\/date.log\r\nFri Mar  8 09:04:09 UTC 2024\r\nFri Mar  8 09:04:14 UTC 2024\r\nFri Mar  8 09:04:19 UTC 2024\r\nFri Mar  8 09:04:24 UTC 2024\r\nFri Mar  8 09:04:29 UTC 2024\r\nFri Mar  8 09:04:34 UTC 2024\r\nFri Mar  8 09:04:39 UTC 2024\r\nFri Mar  8 09:04:44 UTC 2024\r\nFri Mar  8 09:04:49 UTC 2024\r\nFri Mar  8 09:04:54 UTC 2024\r\nFri Mar  8 09:04:59 UTC 2024\r\nFri Mar  8 09:05:04 UTC 2024\r\nFri Mar  8 09:05:09 UTC 2024\r\nFri Mar  8 09:05:14 UTC 2024\r\nFri Mar  8 09:05:19 UTC 2024\r\nFri Mar  8 09:05:24 UTC 2024\r\nFri Mar  8 09:05:29 UTC 2024\r\nFri Mar  8 09:05:34 UTC 2024\r\nFri Mar  8 09:05:39 UTC 2024\r\nFri Mar  8 09:05:44 UTC 2024\r\nFri Mar  8 09:05:49 UTC 2024\r\nFri Mar  8 09:05:54 UTC 2024\r\nFri Mar  8 09:05:59 UTC 2024\r\nFri Mar  8 09:06:04 UTC 2024\r\nFri Mar  8 09:06:09 UTC 2024\r\nFri Mar  8 09:06:14 UTC 2024\r\nFri Mar  8 09:06:19 UTC 2024\r\nFri Mar  8 09:06:24 UTC 2024\r\nFri Mar  8 09:06:29 UTC 2024\r\nFri Mar  8 09:06:34 UTC 2024\r\nFri Mar  8 09:06:39 UTC 2024\r\nFri Mar  8 09:06:44 UTC 2024\r\nFri Mar  8 09:06:49 UTC 2024\r\nFri Mar  8 09:06:54 UTC 2024\r\nFri Mar  8 09:06:59 UTC 2024\r\nFri Mar  8 09:07:04 UTC 2024\r\nFri Mar  8 09:07:09 UTC 2024\r\nFri Mar  8 09:07:14 UTC 2024\r\n# exit\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Fixing a Deployment<\/span><\/p>\n<p>Start the Deployment from the redis.yaml file in the course Git repository.<br \/>\nFix any problems that may occur while starting it.<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ckad]# cat redis.yaml\r\n---\r\napiVersion: apps\/v1beta1\r\nkind: Deployment\r\nmetadata:\r\n  name: redis\r\n  labels:\r\n    app: redis\r\nspec:\r\n  selector:\r\n    matchLabels:\r\n      app: redis\r\n  replicas:\r\n  template:\r\n    metadata:\r\n      labels:\r\n        app: redis\r\n    spec:\r\n      containers:\r\n      - name: redis\r\n        image: redis:alpine\r\n        ports:\r\n        - containerPort: 6379\r\n          name: redis\r\n\r\n[root@controller ckad]# kubectl create -f redis.yaml\r\nerror: resource mapping not found for name: \"redis\" namespace: \"\" from \"redis.yaml\": no matches for kind \"Deployment\" in version \"apps\/v1beta1\"\r\nensure CRDs are installed first\r\n\r\n[root@controller ckad]# kubectl api-versions\r\nadmissionregistration.k8s.io\/v1\r\napiextensions.k8s.io\/v1\r\napiregistration.k8s.io\/v1\r\napps\/v1\r\nauthentication.k8s.io\/v1\r\nauthorization.k8s.io\/v1\r\nautoscaling\/v1\r\nautoscaling\/v2\r\nbatch\/v1\r\ncertificates.k8s.io\/v1\r\ncoordination.k8s.io\/v1\r\ncrd.projectcalico.org\/v1\r\ndiscovery.k8s.io\/v1\r\nevents.k8s.io\/v1\r\nflowcontrol.apiserver.k8s.io\/v1beta2\r\nflowcontrol.apiserver.k8s.io\/v1beta3\r\nnetworking.k8s.io\/v1\r\nnode.k8s.io\/v1\r\npolicy\/v1\r\nrbac.authorization.k8s.io\/v1\r\nscheduling.k8s.io\/v1\r\nstable.example.com\/v1\r\nstorage.k8s.io\/v1\r\nv1\r\n\r\n[root@controller ckad]# vim redis.yaml\r\n\r\n[root@controller ckad]# kubectl create -f redis.yaml\r\ndeployment.apps\/redis created\r\n\r\n[root@controller ckad]# cat redis.yaml\r\n---\r\napiVersion: apps\/v1\r\nkind: Deployment\r\nmetadata:\r\n  name: redis\r\n  labels:\r\n    app: redis\r\nspec:\r\n  selector:\r\n    matchLabels:\r\n      app: redis\r\n  replicas:\r\n  template:\r\n    metadata:\r\n      labels:\r\n        app: redis\r\n    spec:\r\n      containers:\r\n      - name: redis\r\n        image: redis:alpine\r\n        ports:\r\n        - containerPort: 6379\r\n          name: redis\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Using Probes<\/span><\/p>\n<p>Create a Pod that runs the nginx webserver<\/p>\n<ul>\n<li>The webserver should be offering its services on port 80 and run in the<br \/>\nckad-ns3 Namespace<\/li>\n<li>This Pod should check the <code>\/healthz<\/code> path on the API-server before starting the main container<\/li>\n<\/ul>\n<p>Go to the documentation, search &#8220;healthz api&#8221; -&gt; Kubernetes API health endpoints.<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ckad]# curl -k https:\/\/localhost:6443\/livez?verbose\r\n[+]ping ok\r\n[+]log ok\r\n[+]etcd ok\r\n[+]poststarthook\/start-kube-apiserver-admission-initializer ok\r\n[+]poststarthook\/generic-apiserver-start-informers ok\r\n[+]poststarthook\/priority-and-fairness-config-consumer ok\r\n[+]poststarthook\/priority-and-fairness-filter ok\r\n[+]poststarthook\/storage-object-count-tracker-hook ok\r\n[+]poststarthook\/start-apiextensions-informers ok\r\n[+]poststarthook\/start-apiextensions-controllers ok\r\n[+]poststarthook\/crd-informer-synced ok\r\n[+]poststarthook\/start-service-ip-repair-controllers ok\r\n[+]poststarthook\/rbac\/bootstrap-roles ok\r\n[+]poststarthook\/scheduling\/bootstrap-system-priority-classes ok\r\n[+]poststarthook\/priority-and-fairness-config-producer ok\r\n[+]poststarthook\/start-system-namespaces-controller ok\r\n[+]poststarthook\/bootstrap-controller ok\r\n[+]poststarthook\/start-cluster-authentication-info-controller ok\r\n[+]poststarthook\/start-kube-apiserver-identity-lease-controller ok\r\n[+]poststarthook\/start-deprecated-kube-apiserver-identity-lease-garbage-collector ok\r\n[+]poststarthook\/start-kube-apiserver-identity-lease-garbage-collector ok\r\n[+]poststarthook\/start-legacy-token-tracking-controller ok\r\n[+]poststarthook\/aggregator-reload-proxy-client-cert ok\r\n[+]poststarthook\/start-kube-aggregator-informers ok\r\n[+]poststarthook\/apiservice-registration-controller ok\r\n[+]poststarthook\/apiservice-status-available-controller ok\r\n[+]poststarthook\/kube-apiserver-autoregistration ok\r\n[+]autoregister-completion ok\r\n[+]poststarthook\/apiservice-openapi-controller ok\r\n[+]poststarthook\/apiservice-openapiv3-controller ok\r\n[+]poststarthook\/apiservice-discovery-controller ok\r\nlivez check passed\r\n\r\n[root@controller ckad]# curl -k https:\/\/localhost:8443\/healthz?verbose\r\ncurl: (7) Failed to connect to localhost port 8443: Po\u0142\u0105czenie odrzucone\r\n\r\n[root@controller ckad]# curl -k https:\/\/localhost:6443\/healthz?verbose\r\n[+]ping ok\r\n[+]log ok\r\n[+]etcd ok\r\n[+]poststarthook\/start-kube-apiserver-admission-initializer ok\r\n[+]poststarthook\/generic-apiserver-start-informers ok\r\n[+]poststarthook\/priority-and-fairness-config-consumer ok\r\n[+]poststarthook\/priority-and-fairness-filter ok\r\n[+]poststarthook\/storage-object-count-tracker-hook ok\r\n[+]poststarthook\/start-apiextensions-informers ok\r\n[+]poststarthook\/start-apiextensions-controllers ok\r\n[+]poststarthook\/crd-informer-synced ok\r\n[+]poststarthook\/start-service-ip-repair-controllers ok\r\n[+]poststarthook\/rbac\/bootstrap-roles ok\r\n[+]poststarthook\/scheduling\/bootstrap-system-priority-classes ok\r\n[+]poststarthook\/priority-and-fairness-config-producer ok\r\n[+]poststarthook\/start-system-namespaces-controller ok\r\n[+]poststarthook\/bootstrap-controller ok\r\n[+]poststarthook\/start-cluster-authentication-info-controller ok\r\n[+]poststarthook\/start-kube-apiserver-identity-lease-controller ok\r\n[+]poststarthook\/start-deprecated-kube-apiserver-identity-lease-garbage-collector ok\r\n[+]poststarthook\/start-kube-apiserver-identity-lease-garbage-collector ok\r\n[+]poststarthook\/start-legacy-token-tracking-controller ok\r\n[+]poststarthook\/aggregator-reload-proxy-client-cert ok\r\n[+]poststarthook\/start-kube-aggregator-informers ok\r\n[+]poststarthook\/apiservice-registration-controller ok\r\n[+]poststarthook\/apiservice-status-available-controller ok\r\n[+]poststarthook\/kube-apiserver-autoregistration ok\r\n[+]autoregister-completion ok\r\n[+]poststarthook\/apiservice-openapi-controller ok\r\n[+]poststarthook\/apiservice-openapiv3-controller ok\r\n[+]poststarthook\/apiservice-discovery-controller ok\r\nhealthz check passed\r\n\r\n[root@controller ckad]# echo $?\r\n0\r\n[root@controller ckad]# ps aux | grep api\r\nroot     3488764  0.0  0.0  12144  1108 pts\/1    S+   08:43   0:00 grep --color=auto api\r\nroot     3687337  5.0 10.3 1630584 385488 ?      Ssl  lut29 577:17 kube-apiserver --advertise-address=172.30.9.25 --allow-privileged=true --authorization-mode=Node,RBAC --client-ca-file=\/etc\/kubernetes\/pki\/ca.crt --enable-admission-plugins=NodeRestriction --enable-bootstrap-token-auth=true --etcd-cafile=\/etc\/kubernetes\/pki\/etcd\/ca.crt --etcd-certfile=\/etc\/kubernetes\/pki\/apiserver-etcd-client.crt --etcd-keyfile=\/etc\/kubernetes\/pki\/apiserver-etcd-client.key --etcd-servers=https:\/\/127.0.0.1:2379 --kubelet-client-certificate=\/etc\/kubernetes\/pki\/apiserver-kubelet-client.crt --kubelet-client-key=\/etc\/kubernetes\/pki\/apiserver-kubelet-client.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --proxy-client-cert-file=\/etc\/kubernetes\/pki\/front-proxy-client.crt --proxy-client-key-file=\/etc\/kubernetes\/pki\/front-proxy-client.key --requestheader-allowed-names=front-proxy-client --requestheader-client-ca-file=\/etc\/kubernetes\/pki\/front-proxy-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https:\/\/kubernetes.default.svc.cluster.local --service-account-key-file=\/etc\/kubernetes\/pki\/sa.pub --service-account-signing-key-file=\/etc\/kubernetes\/pki\/sa.key --service-cluster-ip-range=10.96.0.0\/12 --tls-cert-file=\/etc\/kubernetes\/pki\/apiserver.crt --tls-private-key-file=\/etc\/kubernetes\/pki\/apiserver.key\r\n\r\n[root@controller ckad]# kubectl get pods -n kube-system -o wide\r\nNAME                                             READY   STATUS             RESTARTS           AGE     IP               NODE                     NOMINATED NODE   READINESS GATES\r\ncalico-kube-controllers-658d97c59c-7vxhd         0\/1     CrashLoopBackOff   2752 (53s ago)     7d21h   172.16.102.151   worker1.example.com      &lt;none&gt;           &lt;none&gt;\r\ncalico-node-d2plz                                0\/1     CrashLoopBackOff   2237 (2m16s ago)   7d21h   172.30.9.27      worker2.example.com      &lt;none&gt;           &lt;none&gt;\r\ncalico-node-vwl67                                0\/1     CrashLoopBackOff   2239 (2m58s ago)   7d21h   172.30.9.26      worker1.example.com      &lt;none&gt;           &lt;none&gt;\r\ncalico-node-zgsx7                                1\/1     Running            0                  7d21h   172.30.9.25      controller.example.com   &lt;none&gt;           &lt;none&gt;\r\ncoredns-5dd5756b68-9hwls                         0\/1     CrashLoopBackOff   2200 (2m35s ago)   7d21h   172.16.102.152   worker1.example.com      &lt;none&gt;           &lt;none&gt;\r\ncoredns-5dd5756b68-wwq8f                         0\/1     CrashLoopBackOff   2200 (16s ago)     7d21h   172.16.102.154   worker1.example.com      &lt;none&gt;           &lt;none&gt;\r\netcd-controller.example.com                      1\/1     Running            6                  7d21h   172.30.9.25      controller.example.com   &lt;none&gt;           &lt;none&gt;\r\nkube-apiserver-controller.example.com            1\/1     Running            10                 7d21h   172.30.9.25      controller.example.com   &lt;none&gt;           &lt;none&gt;\r\nkube-controller-manager-controller.example.com   1\/1     Running            0                  7d21h   172.30.9.25      controller.example.com   &lt;none&gt;           &lt;none&gt;\r\nkube-proxy-26j88                                 1\/1     Running            3                  7d21h   172.30.9.26      worker1.example.com      &lt;none&gt;           &lt;none&gt;\r\nkube-proxy-jprp5                                 1\/1     Running            0                  7d21h   172.30.9.25      controller.example.com   &lt;none&gt;           &lt;none&gt;\r\nkube-proxy-jz8hj                                 1\/1     Running            3                  7d21h   172.30.9.27      worker2.example.com      &lt;none&gt;           &lt;none&gt;\r\nkube-scheduler-controller.example.com            1\/1     Running            202                7d21h   172.30.9.25      controller.example.com   &lt;none&gt;           &lt;none&gt;\r\nmetrics-server-6db4d75b97-sd7jb                  0\/1     CrashLoopBackOff   909 (4s ago)       46h     172.16.71.220    worker2.example.com      &lt;none&gt;           &lt;none&gt;\r\nmetrics-server-7697b55fbd-srpjn                  0\/1     CrashLoopBackOff   1074 (4m4s ago)    2d6h    172.16.102.153   worker1.example.com      &lt;none&gt;           &lt;none&gt;\r\n<\/pre>\n<p>Go to the documentation, search &#8220;probes&#8221; -&gt; Configure Liveness, Readiness and Startup Probes<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ckad]# vi task158.yaml\r\n[root@controller ckad]# cat task158.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  labels:\r\n    test: liveness\r\n  name: liveness-exec\r\nspec:\r\n  containers:\r\n  - name: liveness\r\n    image: registry.k8s.io\/busybox\r\n    args:\r\n    - \/bin\/sh\r\n    - -c\r\n    - touch \/tmp\/healthy; sleep 30; rm -f \/tmp\/healthy; sleep 600\r\n    livenessProbe:\r\n      exec:\r\n        command:\r\n        - cat\r\n        - \/tmp\/healthy\r\n      initialDelaySeconds: 5\r\n      periodSeconds: 5\r\n\r\n\r\n[root@controller ckad]# kubectl explain pod.spec.containers.ports\r\nKIND:       Pod\r\nVERSION:    v1\r\n\r\nFIELD: ports &lt;[]ContainerPort&gt;\r\n\r\nDESCRIPTION:\r\n    List of ports to expose from the container. Not specifying a port here DOES\r\n    NOT prevent that port from being exposed. Any port which is listening on the\r\n    default \"0.0.0.0\" address inside a container will be accessible from the\r\n    network. Modifying this array with strategic merge patch may corrupt the\r\n    data. For more information See\r\n    https:\/\/github.com\/kubernetes\/kubernetes\/issues\/108255. Cannot be updated.\r\n    ContainerPort represents a network port in a single container.\r\n\r\nFIELDS:\r\n  containerPort &lt;integer&gt; -required-\r\n    Number of port to expose on the pod's IP address. This must be a valid port\r\n    number, 0 &lt; x &lt; 65536.\r\n\r\n  hostIP        &lt;string&gt;\r\n    What host IP to bind the external port to.\r\n\r\n  hostPort      &lt;integer&gt;\r\n    Number of port to expose on the host. If specified, this must be a valid\r\n    port number, 0 &lt; x &lt; 65536. If HostNetwork is specified, this must match\r\n    ContainerPort. Most containers do not need this.\r\n\r\n  name  &lt;string&gt;\r\n    If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\r\n    named port in a pod must have a unique name. Name for the port that can be\r\n    referred to by services.\r\n\r\n  protocol      &lt;string&gt;\r\n    Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".\r\n\r\n    Possible enum values:\r\n     - `\"SCTP\"` is the SCTP protocol.\r\n     - `\"TCP\"` is the TCP protocol.\r\n     - `\"UDP\"` is the UDP protocol.\r\n\r\n\r\n[root@controller ckad]# vi task158.yaml\r\n[root@controller ckad]# cat task158.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  labels:\r\n    test: readiness\r\n  name: readiness-exec\r\n  namespace: ckad-ns3\r\nspec:\r\n  containers:\r\n  - name: nginx\r\n    image: nginx\r\n    - containerPort: 80\r\n    readinessProbe:\r\n      exec:\r\n        command:\r\n        - curl\r\n        - -k\r\n        - https:\/\/localhost:6443\/healthz\r\n      initialDelaySeconds: 5\r\n      periodSeconds: 5\r\n\r\n[root@controller ckad]# kubectl create -f task158.yaml\r\nerror: error parsing task158.yaml: error converting YAML to JSON: yaml: line 11: did not find expected key\r\n\r\n[root@controller ckad]# vi task158.yaml\r\n[root@controller ckad]# cat task158.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  labels:\r\n    test: readiness\r\n  name: readiness-exec\r\n  namespace: ckad-ns3\r\nspec:\r\n  containers:\r\n  - name: nginx\r\n    image: nginx\r\n    ports:\r\n      - containerPort: 80\r\n    readinessProbe:\r\n      exec:\r\n        command:\r\n        - curl\r\n        - \"-k\"\r\n        - https:\/\/localhost:6443\/healthz\r\n      initialDelaySeconds: 5\r\n      periodSeconds: 5\r\n\r\n[root@controller ckad]# kubectl get pods -n ckad-ns3\r\nNAME             READY   STATUS    RESTARTS   AGE\r\nreadiness-exec   0\/1     Running   0          23m\r\nsidecar-pod      2\/2     Running   0          6h12m\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Creating a Deployment<\/span><\/p>\n<p>Write a manifest file with the name nginx-exam.yaml that meets the<br \/>\nfollowing requirements:<\/p>\n<ul>\n<li>It starts 5 replicas that run the nginx:1.18 image<\/li>\n<li>Each Pod has the label type=webshop<\/li>\n<li>Create the Deployment such that while updating, it can temporarily run 8 application instances at the same time, of which 3 should always be available<\/li>\n<li>The Deployment itself should use the label service=nginx<\/li>\n<li>Update the Deployment to the latest version of the nginx image<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true\">[root@controller ~]# kubectl create deploy nginx-exam --image=nginx:1.18 --dry-run=client -o yaml &gt; nginx-exam.yaml\r\n[root@controller ~]# cat nginx-exam.yaml\r\napiVersion: apps\/v1\r\nkind: Deployment\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    app: nginx-exam\r\n  name: nginx-exam\r\nspec:\r\n  replicas: 1\r\n  selector:\r\n    matchLabels:\r\n      app: nginx-exam\r\n  strategy: {}\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: nginx-exam\r\n    spec:\r\n      containers:\r\n      - image: nginx:1.18\r\n        name: nginx\r\n        resources: {}\r\nstatus: {}\r\n\r\n[root@controller ~]# vim nginx-exam.yaml\r\n[root@controller ~]# cat nginx-exam.yaml\r\napiVersion: apps\/v1\r\nkind: Deployment\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    app: nginx-exam\r\n    service: nginx\r\n  name: nginx-exam\r\nspec:\r\n  replicas: 5\r\n  selector:\r\n    matchLabels:\r\n      app: nginx-exam\r\n  strategy: {}\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: nginx-exam\r\n        type: webshop\r\n    spec:\r\n      containers:\r\n      - image: nginx:1.18\r\n        name: nginx\r\n        resources: {}\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl explain --recursive deployment.spec.strategy\r\n...\r\nFIELDS:\r\n  rollingUpdate &lt;RollingUpdateDeployment&gt;\r\n    maxSurge    &lt;IntOrString&gt;\r\n    maxUnavailable      &lt;IntOrString&gt;\r\n  type  &lt;string&gt;\r\n\r\n[root@controller ~]# vim nginx-exam.yaml\r\n[root@controller ~]# cat nginx-exam.yaml\r\napiVersion: apps\/v1\r\nkind: Deployment\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    app: nginx-exam\r\n    service: nginx\r\n  name: nginx-exam\r\nspec:\r\n  replicas: 5\r\n  selector:\r\n    matchLabels:\r\n      app: nginx-exam\r\n  strategy:\r\n    rollingUpdate:\r\n      maxSurge: 3\r\n      maxUnavailable: 2\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: nginx-exam\r\n        type: webshop\r\n    spec:\r\n      containers:\r\n      - image: nginx:1.18\r\n        name: nginx\r\n        resources: {}\r\nstatus: {}\r\n\r\n\r\n[root@controller ~]# kubectl create -f nginx-exam.yaml\r\ndeployment.apps\/nginx-exam created\r\n\r\n[root@controller ~]# kubectl get all --selector app=nginx-exam\r\nNAME                              READY   STATUS    RESTARTS   AGE\r\npod\/nginx-exam-548d9c4767-gbrll   1\/1     Running   0          25s\r\npod\/nginx-exam-548d9c4767-hc7qz   1\/1     Running   0          25s\r\npod\/nginx-exam-548d9c4767-mrlh2   1\/1     Running   0          25s\r\npod\/nginx-exam-548d9c4767-n88jj   1\/1     Running   0          25s\r\npod\/nginx-exam-548d9c4767-v6qrx   1\/1     Running   0          25s\r\n\r\nNAME                         READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/nginx-exam   5\/5     5            5           25s\r\n\r\nNAME                                    DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/nginx-exam-548d9c4767   5         5         5       25s\r\n\r\n\r\n[root@controller ~]# kubectl set image -h | more\r\nUpdate existing container image(s) of resources.\r\n\r\n Possible resources include (case insensitive):\r\n\r\n        pod (po), replicationcontroller (rc), deployment (deploy), daemonset (ds), statefulset (sts), cronjob (cj), replicaset (rs)\r\n\r\nExamples:\r\n  # Set a deployment's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'\r\n  kubectl set image deployment\/nginx busybox=busybox nginx=nginx:1.9.1\r\n...\r\n\r\n[root@controller ~]# kubectl set image deployment\/nginx-exam nginx=nginx:latest\r\ndeployment.apps\/nginx-exam image updated\r\n\r\n[root@controller ~]# kubectl get all --selector app=nginx-exam\r\nNAME                              READY   STATUS              RESTARTS   AGE\r\npod\/nginx-exam-67d9b7fc84-f5xzt   1\/1     Running             0          2s\r\npod\/nginx-exam-67d9b7fc84-sdw5v   1\/1     Running             0          2s\r\npod\/nginx-exam-67d9b7fc84-wzxd6   1\/1     Running             0          2s\r\npod\/nginx-exam-67d9b7fc84-xxzvv   0\/1     ContainerCreating   0          2s\r\npod\/nginx-exam-67d9b7fc84-z2257   0\/1     ContainerCreating   0          2s\r\npod\/nginx-exam-79947bcdb8-24xs8   1\/1     Terminating         0          100s\r\npod\/nginx-exam-79947bcdb8-6dprv   1\/1     Running             0          100s\r\npod\/nginx-exam-79947bcdb8-p2kkz   1\/1     Terminating         0          100s\r\n\r\nNAME                         READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/nginx-exam   3\/5     5            3           27m\r\n\r\nNAME                                    DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/nginx-exam-548d9c4767   0         0         0       27m\r\nreplicaset.apps\/nginx-exam-67d9b7fc84   5         5         3       2m47s\r\nreplicaset.apps\/nginx-exam-79947bcdb8   0         1         1       100s\r\n\r\n\r\n[root@controller ~]# kubectl get all --selector app=nginx-exam\r\nNAME                              READY   STATUS    RESTARTS   AGE\r\npod\/nginx-exam-67d9b7fc84-f5xzt   1\/1     Running   0          7s\r\npod\/nginx-exam-67d9b7fc84-sdw5v   1\/1     Running   0          7s\r\npod\/nginx-exam-67d9b7fc84-wzxd6   1\/1     Running   0          7s\r\npod\/nginx-exam-67d9b7fc84-xxzvv   1\/1     Running   0          7s\r\npod\/nginx-exam-67d9b7fc84-z2257   1\/1     Running   0          7s\r\n\r\nNAME                         READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/nginx-exam   5\/5     5            5           27m\r\n\r\nNAME                                    DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/nginx-exam-548d9c4767   0         0         0       27m\r\nreplicaset.apps\/nginx-exam-67d9b7fc84   5         5         5       2m52s\r\nreplicaset.apps\/nginx-exam-79947bcdb8   0         0         0       105s\r\n\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Exposing Applications<\/span><\/p>\n<p>In the ckad-ns6 Namespace, create a Deployment that runs the nginx 1.19<br \/>\nimage and give it the name nginx-deployment<\/p>\n<ul>\n<li>Ensure it runs 3 replicas<\/li>\n<li>After verifying that the Deployment runs successfully, expose it such that users that are external to the cluster can reach it by addressing the Node Port 32000 on the Kubernetes Cluster node<\/li>\n<li>Configure Ingress to access the application at mynginx.info<\/li>\n<\/ul>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl create ns ckad-ns6\r\nnamespace\/ckad-ns6 created\r\n\r\n[root@controller ~]# kubectl create deploy nginx-deployment --image=nginx:1.19 --replicas=3 -n ckad-ns6\r\ndeployment.apps\/nginx-deployment created\r\n\r\n[root@controller ~]# kubectl expose -h\r\n...\r\n  # Create a service for an nginx deployment, which serves on port 80 and connects to the containers on port 8000\r\n  kubectl expose deployment nginx --port=80 --target-port=8000\r\n...\r\n\r\n[root@controller ~]# kubectl expose deployment -n ckad-ns6 nginx-deployment --port=80\r\nservice\/nginx-deployment exposed\r\n\r\n[root@controller ~]# kubectl edit svc nginx-deployment -n ckad-ns6\r\n<\/pre>\n<p>Change:<\/p>\n<pre class=\"lang:default decode:true \">  - port: 80\r\n    protocol: TCP\r\n    targetPort: 80\r\n  selector:\r\n    app: nginx-deployment\r\n  sessionAffinity: None\r\n  type: ClusterIP\r\n\r\n<\/pre>\n<p>To:<\/p>\n<pre class=\"lang:default decode:true \">  - port: 80\r\n    nodePort: 32000\r\n    protocol: TCP\r\n    targetPort: 80\r\n  selector:\r\n    app: nginx-deployment\r\n  sessionAffinity: None\r\n  type: NodePort\r\n<\/pre>\n<p>And now:<\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl get svc -n ckad-ns6\r\nNAME               TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE\r\nnginx-deployment   NodePort   10.110.142.232   &lt;none&gt;        80:32000\/TCP   13m\r\n\r\n[root@controller ~]# kubectl create ingress -h | more\r\n...\r\n  # Create an ingress with the same host and multiple paths\r\n  kubectl create ingress multipath --class=default \\\r\n  --rule=\"foo.com\/=svc:port\" \\\r\n  --rule=\"foo.com\/admin\/=svcadmin:portadmin\"\r\n...\r\n\r\n[root@controller ~]# kubectl create ingress nginxdeploy --class=default --rule=\"mynginx.info\/=nginx-deployment:80\"\r\ningress.networking.k8s.io\/nginxdeploy created\r\n\r\n[root@controller ~]# kubectl delete ingress nginxdeploy\r\ningress.networking.k8s.io \"nginxdeploy\" deleted\r\n\r\n[root@controller ~]# kubectl create ingress nginxdeploy --class=default --rule=\"mynginx.info\/=nginx-deployment:80\" -n ckad-ns6\r\ningress.networking.k8s.io\/nginxdeploy created\r\n\r\n[root@controller ~]# kubectl get ingress -n ckad-ns6\r\nNAME          CLASS     HOSTS          ADDRESS   PORTS   AGE\r\nnginxdeploy   default   mynginx.info             80      26s\r\n[root@controller ~]# vim \/etc\/hosts\r\n[root@controller ~]# cat \/etc\/hosts\r\n127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4\r\n::1         localhost localhost.localdomain localhost6 localhost6.localdomain6\r\n172.30.9.25 controller controller.example.com\r\n172.30.9.26 worker1 worker1.example.com  mynginx.info\r\n172.30.9.27 worker2 worker2.example.com\r\n\r\n[root@controller ~]# kubectl get pod\r\nNo resources found in default namespace.\r\n\r\n[root@controller ~]# kubectl get pod -n ckad-ns6\r\nNAME                                READY   STATUS    RESTARTS   AGE\r\nnginx-deployment-799c656846-68gt5   1\/1     Running   0          21m\r\nnginx-deployment-799c656846-8l78n   1\/1     Running   0          21m\r\nnginx-deployment-799c656846-dc6l8   1\/1     Running   0          21m\r\n\r\n[root@controller ~]# kubectl get pod -n ckad-ns6 -o wide\r\nNAME                                READY   STATUS    RESTARTS   AGE   IP               NODE                  NOMINATED NODE   READINESS GATES\r\nnginx-deployment-799c656846-68gt5   1\/1     Running   0          21m   172.16.102.171   worker1.example.com   &lt;none&gt;           &lt;none&gt;\r\nnginx-deployment-799c656846-8l78n   1\/1     Running   0          21m   172.16.71.241    worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\nnginx-deployment-799c656846-dc6l8   1\/1     Running   0          21m   172.16.71.242    worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\n\r\n[root@controller ~]# ping mynginx.info\r\nPING worker1 (172.30.9.26) 56(84) bytes of data.\r\n64 bytes from worker1 (172.30.9.26): icmp_seq=1 ttl=64 time=0.423 ms\r\n64 bytes from worker1 (172.30.9.26): icmp_seq=2 ttl=64 time=0.350 ms\r\n^C\r\n--- worker1 ping statistics ---\r\n2 packets transmitted, 2 received, 0% packet loss, time 1012ms\r\nrtt min\/avg\/max\/mdev = 0.350\/0.386\/0.423\/0.041 ms\r\n\r\n[root@controller ~]# curl mynginx.info\r\ncurl: (7) Failed to connect to mynginx.info port 80: Brak trasy do hosta\r\n[root@controller ~]# curl 172.30.9.26\r\ncurl: (7) Failed to connect to 172.30.9.26 port 80: Brak trasy do hosta\r\n[root@controller ~]# curl 10.110.142.232\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Using NetworkPolicies<\/span><\/p>\n<p>Create a YAML file with the name my-nw-policy that runs two Pods and a<br \/>\nNetworkPolicy<\/p>\n<ul>\n<li>The first Pod should run an Nginx server with default settings<\/li>\n<li>The second Pod should run a busybox image with the sleep 3600 command<\/li>\n<li>Use a NetworkPolicy to restrict traffic between Pods in the following way:\n<ul>\n<li>Access to the nginx server is allowed for the busybox Pod<\/li>\n<li>The busybox Pod is not restricted in any way<\/li>\n<\/ul>\n<\/li>\n<\/ul>\n<p>Go to the documentation, search: networkpolicy -&gt; Network Policies<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# vim my-nw-policy.yaml\r\n[root@controller ~]# cat my-nw-policy.yaml\r\napiVersion: networking.k8s.io\/v1\r\nkind: NetworkPolicy\r\nmetadata:\r\n  name: test-network-policy\r\n  namespace: default\r\nspec:\r\n  podSelector:\r\n    matchLabels:\r\n      role: db\r\n  policyTypes:\r\n  - Ingress\r\n  - Egress\r\n  ingress:\r\n  - from:\r\n    - ipBlock:\r\n        cidr: 172.17.0.0\/16\r\n        except:\r\n        - 172.17.1.0\/24\r\n    - namespaceSelector:\r\n        matchLabels:\r\n          project: myproject\r\n    - podSelector:\r\n        matchLabels:\r\n          role: frontend\r\n    ports:\r\n    - protocol: TCP\r\n      port: 6379\r\n  egress:\r\n  - to:\r\n    - ipBlock:\r\n        cidr: 10.0.0.0\/24\r\n    ports:\r\n    - protocol: TCP\r\n      port: 5978\r\n<\/pre>\n<p>Go to the documentation, search: pods -&gt; Pods and copy simple-pod.yaml<\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# vi my-nw-policy.yaml\r\n[root@controller ~]# cat my-nw-policy.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: nginx\r\nspec:\r\n  containers:\r\n  - name: nginx\r\n    image: nginx:1.14.2\r\n    ports:\r\n    - containerPort: 80\r\n---\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: nginx\r\nspec:\r\n  containers:\r\n  - name: nginx\r\n    image: nginx:1.14.2\r\n    ports:\r\n    - containerPort: 80\r\n---\r\napiVersion: networking.k8s.io\/v1\r\nkind: NetworkPolicy\r\nmetadata:\r\n  name: test-network-policy\r\n  namespace: default\r\nspec:\r\n  podSelector:\r\n    matchLabels:\r\n      role: db\r\n  policyTypes:\r\n  - Ingress\r\n  - Egress\r\n  ingress:\r\n  - from:\r\n    - ipBlock:\r\n        cidr: 172.17.0.0\/16\r\n        except:\r\n        - 172.17.1.0\/24\r\n    - namespaceSelector:\r\n        matchLabels:\r\n          project: myproject\r\n    - podSelector:\r\n        matchLabels:\r\n          role: frontend\r\n    ports:\r\n    - protocol: TCP\r\n      port: 6379\r\n  egress:\r\n  - to:\r\n    - ipBlock:\r\n        cidr: 10.0.0.0\/24\r\n    ports:\r\n    - protocol: TCP\r\n      port: 5978\r\n\r\n[root@controller ~]# vim my-nw-policy.yaml\r\n[root@controller ~]# cat my-nw-policy.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: nwp-nginx\r\nspec:\r\n  containers:\r\n  - name: nginx\r\n    image: nginx\r\n    ports:\r\n    - containerPort: 80\r\n---\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: nwp-busybox\r\nspec:\r\n  containers:\r\n  - name: busybox\r\n    image: busybox\r\n    ports:\r\n    - containerPort: 80\r\n---\r\napiVersion: networking.k8s.io\/v1\r\nkind: NetworkPolicy\r\nmetadata:\r\n  name: test-network-policy\r\n  namespace: default\r\nspec:\r\n  podSelector:\r\n    matchLabels:\r\n      role: db\r\n  policyTypes:\r\n  - Ingress\r\n  - Egress\r\n  ingress:\r\n  - from:\r\n    - ipBlock:\r\n        cidr: 172.17.0.0\/16\r\n        except:\r\n        - 172.17.1.0\/24\r\n    - namespaceSelector:\r\n        matchLabels:\r\n          project: myproject\r\n    - podSelector:\r\n        matchLabels:\r\n          role: frontend\r\n    ports:\r\n    - protocol: TCP\r\n      port: 6379\r\n  egress:\r\n  - to:\r\n    - ipBlock:\r\n        cidr: 10.0.0.0\/24\r\n    ports:\r\n    - protocol: TCP\r\n      port: 5978\r\n\r\n\r\n[root@controller ~]# kubectl run busybox --image=busybox --dry-run=client -o yaml -- sleep 3600\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: busybox\r\n  name: busybox\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: busybox\r\n    name: busybox\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n\r\n[root@controller ~]# cat my-nw-policy.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: nwp-nginx\r\n  labels:\r\n    app: nginx\r\nspec:\r\n  containers:\r\n  - name: nginx\r\n    image: nginx\r\n---\r\n\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: nwp-busybox\r\n  labels:\r\n    access: allowed\r\nspec:\r\n  containers:\r\n  - name: busybox\r\n    image: busybox\r\n    args:\r\n    - sleep\r\n    - \"3600\"\r\n---\r\n\r\napiVersion: networking.k8s.io\/v1\r\nkind: NetworkPolicy\r\nmetadata:\r\n  name: test-network-policy\r\n  namespace: default\r\nspec:\r\n  podSelector:\r\n    matchLabels:\r\n      app: nginx\r\n  policyTypes:\r\n  - Ingress\r\n  ingress:\r\n  - from:\r\n    - podSelector:\r\n        matchLabels:\r\n          role: frontend\r\n\r\n\r\n[root@controller ~]# kubectl create -f my-nw-policy.yaml\r\npod\/nwp-nginx created\r\npod\/nwp-busybox created\r\nnetworkpolicy.networking.k8s.io\/test-network-policy created\r\n\r\n[root@controller ~]# kubectl expose pod nwp-nginx --port=80\r\nservice\/nwp-nginx exposed\r\n\r\n[root@controller ~]# kubectl get all\r\nNAME              READY   STATUS    RESTARTS   AGE\r\npod\/nwp-busybox   1\/1     Running   0          58s\r\npod\/nwp-nginx     1\/1     Running   0          58s\r\n\r\nNAME                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE\r\nservice\/kubernetes   ClusterIP   10.96.0.1        &lt;none&gt;        443\/TCP   8d\r\nservice\/nwp-nginx    ClusterIP   10.101.186.122   &lt;none&gt;        80\/TCP    8s\r\n\r\n[root@controller ~]# kubectl exec -it busybox -- wget --spider --timeout=1 nginx\r\nError from server (NotFound): pods \"busybox\" not found\r\n\r\n[root@controller ~]# kubectl exec -it nwp-busybox -- wget --spider --timeout=1 nginx\r\nwget: bad address 'nginx'\r\ncommand terminated with exit code 1\r\n\r\n[root@controller ~]# kubectl exec -it nwp-busybox -- wget --spider --timeout=1 10.101.186.122\r\nConnecting to 10.101.186.122 (10.101.186.122:80)\r\nwget: download timed out\r\ncommand terminated with exit code 1\r\n\r\n\r\n[root@controller ~]# kubectl label pod nwp-busybox role=frontend\r\npod\/nwp-busybox labeled\r\n\r\n[root@controller ~]# kubectl exec -it nwp-busybox -- wget --spider --timeout=1 10.101.186.122\r\nConnecting to 10.101.186.122 (10.101.186.122:80)\r\nwget: download timed out\r\ncommand terminated with exit code 1\r\n\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Using Storage<\/span><\/p>\n<p>All objects in this assignment should be created in the ckad-1311 Namespace.<\/p>\n<ul>\n<li>Create a PersistentVolume with the name 1311-pv. It should provide 2 GiB of storage and read\/write access to multiple clients simultaneously. Use the hostPath storage type<\/li>\n<li>Next, create a PersistentVolumeClaim that requests 1 GiB from any PersistentVolume that allows multiple clients simultaneous read\/write access. The name of the object should be 1311-pvc<\/li>\n<li>Finally, create a Pod with the name 1311-pod that uses this PersistentVolume. It should run an nginx image and mount the volume on the directory \/webdata<\/li>\n<\/ul>\n<p>Go to the documentation,, search: persistent volume -&gt; Configure a Pod to Use a PersistentVolume for Storage -&gt; Create a PersistentVolume<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# vi task1512.yaml\r\n[root@controller ~]# cat task1512.yaml\r\napiVersion: v1\r\nkind: PersistentVolume\r\nmetadata:\r\n  name: task-pv-volume\r\n  labels:\r\n    type: local\r\nspec:\r\n  storageClassName: manual\r\n  capacity:\r\n    storage: 10Gi\r\n  accessModes:\r\n    - ReadWriteOnce\r\n  hostPath:\r\n    path: \"\/mnt\/data\"\r\n---\r\n\r\napiVersion: v1\r\nkind: PersistentVolumeClaim\r\nmetadata:\r\n  name: task-pv-claim\r\nspec:\r\n  storageClassName: manual\r\n  accessModes:\r\n    - ReadWriteOnce\r\n  resources:\r\n    requests:\r\n      storage: 3Gi\r\n---\r\n\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: task-pv-pod\r\nspec:\r\n  volumes:\r\n    - name: task-pv-storage\r\n      persistentVolumeClaim:\r\n        claimName: task-pv-claim\r\n  containers:\r\n    - name: task-pv-container\r\n      image: nginx\r\n      ports:\r\n        - containerPort: 80\r\n          name: \"http-server\"\r\n      volumeMounts:\r\n        - mountPath: \"\/usr\/share\/nginx\/html\"\r\n          name: task-pv-storage\r\n\r\n[root@controller ~]# vi task1512.yaml\r\n[root@controller ~]# cat task1512.yaml\r\napiVersion: v1\r\nkind: PersistentVolume\r\nmetadata:\r\n  name: 1312-pv\r\n  namespace: ckad-1312\r\n  labels:\r\n    type: local\r\nspec:\r\n  storageClassName: manual\r\n  capacity:\r\n    storage: 2Gi\r\n  accessModes:\r\n    - ReadWriteMany\r\n  hostPath:\r\n    path: \"\/mnt\/data\"\r\n---\r\n\r\napiVersion: v1\r\nkind: PersistentVolumeClaim\r\nmetadata:\r\n  name: 1312-pvc\r\n  namespace: ckad-1312\r\nspec:\r\n  storageClassName: manual\r\n  accessModes:\r\n    - ReadWriteMany\r\n  resources:\r\n    requests:\r\n      storage: 1Gi\r\n---\r\n\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  name: 1312-pod\r\n  namespace: ckad-1312\r\nspec:\r\n  volumes:\r\n    - name: task-pv-storage\r\n      persistentVolumeClaim:\r\n        claimName: 1312-pvc\r\n  containers:\r\n    - name: task-pv-container\r\n      image: nginx\r\n      ports:\r\n        - containerPort: 80\r\n          name: \"http-server\"\r\n      volumeMounts:\r\n        - mountPath: \"\/webdata\"\r\n          name: task-pv-storage\r\n\r\n\r\n[root@controller ~]# kubectl create ns ckad-1312\r\nnamespace\/ckad-1312 created\r\n\r\n[root@controller ~]# kubectl create -f task1512.yaml\r\npersistentvolume\/1312-pv created\r\npersistentvolumeclaim\/1312-pvc created\r\npod\/1312-pod created\r\n\r\n[root@controller ~]# kubectl get pods,pvc,pv -n ckad-1312\r\nNAME           READY   STATUS    RESTARTS   AGE\r\npod\/1312-pod   1\/1     Running   0          76s\r\n\r\nNAME                             STATUS   VOLUME    CAPACITY   ACCESS MODES   STORAGECLASS   AGE\r\npersistentvolumeclaim\/1312-pvc   Bound    1312-pv   2Gi        RWX            manual         76s\r\n\r\nNAME                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                STORAGECLASS     REASON   AGE\r\npersistentvolume\/1312-pv   2Gi        RWX            Retain           Bound    ckad-1312\/1312-pvc   manual                    76s\r\n\r\n[root@controller ~]# kubectl exec -n ckad-1312 -it 1312-pod -- touch \/webdata\/testfile\r\n\r\n[root@controller ~]# ls \/mnt\/data\r\nls: nie ma dost\u0119pu do '\/mnt\/data': Nie ma takiego pliku ani katalogu\r\n\r\n[root@controller ~]# kubectl get pods -n ckad-1312 -o wide\r\nNAME       READY   STATUS    RESTARTS   AGE   IP              NODE                  NOMINATED NODE   READINESS GATES\r\n1312-pod   1\/1     Running   0          69m   172.16.71.247   worker2.example.com   &lt;none&gt;           &lt;none&gt;\r\n\r\n[root@controller ~]# ssh root@worker2\r\nLast login: Sat Mar  9 02:20:36 2024 from 10.8.152.84\r\n\r\n[root@worker2 ~]# ls \/mnt\/data\r\ntestfile\r\n\r\n[root@worker2 ~]# wylogowanie\r\nConnection to worker2 closed.\r\n\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Using Quota<\/span><\/p>\n<p>Create a Namespace with the name limited, in which 5 Pods can be started and a total amount of 1000 millicore and 2 GiB of RAM is available.<\/p>\n<p>Run a Deployment with the name restrictginx in this Namespace, with 3 Pods where every Pod initially requests 64 MiB RAM, with an upper limit of 256 MiB<br \/>\nRAM.<\/p>\n<p>1000 milicore is 1 cpu.<\/p>\n<pre class=\"lang:default decode:true \">[root@controller ~]# kubectl create ns limited\r\n[root@controller ~]# kubectl create quota -h | more\r\nCreate a resource quota with the specified name, hard limits, and optional scopes.\r\n...\r\n  # Create a new resource quota named my-quota\r\n  kubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10\r\n...\r\n[root@controller ~]# # kubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10\r\n[root@controller ~]# kubectl create quota limitedquota -n limited --hard=cpu=1,memory=2G,pods=5\r\nresourcequota\/limitedquota created\r\n\r\n[root@controller ~]# kubectl describe ns limited\r\nName:         limited\r\nLabels:       kubernetes.io\/metadata.name=limited\r\nAnnotations:  &lt;none&gt;\r\nStatus:       Active\r\n\r\nResource Quotas\r\n  Name:     limitedquota\r\n  Resource  Used  Hard\r\n  --------  ---   ---\r\n  cpu       0     1\r\n  memory    0     2G\r\n  pods      0     5\r\n\r\nNo LimitRange resource.\r\n\r\n\r\n[root@controller ~]# kubectl create deploy restrict-nginx --replicas=3 --image=nginx -n limited\r\ndeployment.apps\/restrict-nginx created\r\n\r\n[root@controller ~]# kubectl set resuorces -h | more\r\n...\r\n  # Set the resource request and limits for all containers in nginx\r\n  kubectl set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi\r\n...\r\n[root@controller ~]# # kubectl set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi\r\n[root@controller ~]# kubectl set resources deployment restrict-nginx --limits=memory=256Mi --requests=memory=64Mi -n limited\r\ndeployment.apps\/restrict-nginx resource requirements updated\r\n\r\n[root@controller ~]# kubectl describe ns limited\r\nName:         limited\r\nLabels:       kubernetes.io\/metadata.name=limited\r\nAnnotations:  &lt;none&gt;\r\nStatus:       Active\r\n\r\nResource Quotas\r\n  Name:     limitedquota\r\n  Resource  Used  Hard\r\n  --------  ---   ---\r\n  cpu       0     1\r\n  memory    0     2G\r\n  pods      0     5\r\n\r\nNo LimitRange resource.\r\n[root@controller ~]# kubectl get all -n limited\r\nNAME                             READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/restrict-nginx   0\/3     0            0           3m25s\r\n\r\nNAME                                        DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/restrict-nginx-67f4786c7    1         0         0       61s\r\nreplicaset.apps\/restrict-nginx-857b64fb78   3         0         0       3m25s\r\n\r\n[root@controller ~]# kubectl describe replicaset.apps\/restrict-nginx-857b64fb78 -n limited\r\nName:           restrict-nginx-857b64fb78\r\nNamespace:      limited\r\nSelector:       app=restrict-nginx,pod-template-hash=857b64fb78\r\nLabels:         app=restrict-nginx\r\n                pod-template-hash=857b64fb78\r\nAnnotations:    deployment.kubernetes.io\/desired-replicas: 3\r\n                deployment.kubernetes.io\/max-replicas: 4\r\n                deployment.kubernetes.io\/revision: 1\r\nControlled By:  Deployment\/restrict-nginx\r\nReplicas:       0 current \/ 3 desired\r\nPods Status:    0 Running \/ 0 Waiting \/ 0 Succeeded \/ 0 Failed\r\nPod Template:\r\n  Labels:  app=restrict-nginx\r\n           pod-template-hash=857b64fb78\r\n  Containers:\r\n   nginx:\r\n    Image:        nginx\r\n    Port:         &lt;none&gt;\r\n    Host Port:    &lt;none&gt;\r\n    Environment:  &lt;none&gt;\r\n    Mounts:       &lt;none&gt;\r\n  Volumes:        &lt;none&gt;\r\nConditions:\r\n  Type             Status  Reason\r\n  ----             ------  ------\r\n  ReplicaFailure   True    FailedCreate\r\nEvents:\r\n  Type     Reason        Age                  From                   Message\r\n  ----     ------        ----                 ----                   -------\r\n  Warning  FailedCreate  4m20s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-ttdnd\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m20s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-xzbqr\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m20s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-hjnvg\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m20s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-khf8q\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m20s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-q6bg4\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m20s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-qvmps\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m20s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-x4jnp\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m19s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-n8f5c\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  4m19s                replicaset-controller  Error creating: pods \"restrict-nginx-857b64fb78-hfd27\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n  Warning  FailedCreate  96s (x7 over 4m17s)  replicaset-controller  (combined from similar events): Error creating: pods \"restrict-nginx-857b64fb78-gfnhk\" is forbidden: failed quota: limitedquota: must specify cpu for: nginx; memory for: nginx\r\n[root@controller ~]#\r\n[root@controller ~]# # kubectl set resources deployment nginx --limits=cpu=1 --requests=cpu=1 -n limited\r\n\r\n[root@controller ~]# kubectl set resources deployment nginx --limits=cpu=1 --requests=cpu=1 -n limited\r\nError from server (NotFound): deployments.apps \"nginx\" not found\r\n\r\n[root@controller ~]# kubectl set resources deployment restrict-nginx --limits=cpu=1 --requests=cpu=1 -n limited\r\ndeployment.apps\/restrict-nginx resource requirements updated\r\n\r\n[root@controller ~]# kubectl describe ns limited\r\nName:         limited\r\nLabels:       kubernetes.io\/metadata.name=limited\r\nAnnotations:  &lt;none&gt;\r\nStatus:       Active\r\n\r\nResource Quotas\r\n  Name:     limitedquota\r\n  Resource  Used  Hard\r\n  --------  ---   ---\r\n  cpu       1     1\r\n  memory    64Mi  2G\r\n  pods      1     5\r\n\r\n\r\n[root@controller ~]# kubectl set resuorces -h | more \r\n... \r\n# Set the resource request and limits for all containers in nginx \r\nkubectl set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi \r\n...\r\n\r\n[root@controller ~]# kubectl create deploy restrict-nginx --replicas=3 --image=nginx -n limited\r\ndeployment.apps\/restrict-nginx created\r\n\r\n[root@controller ~]# kubectl set resources deployment restrict-nginx --limits=cpu=200m,memory=256Mi --requests=cpu=200m,memory=64Mi -n limited\r\ndeployment.apps\/restrict-nginx resource requirements updated\r\n\r\n[root@controller ~]# kubectl describe ns limited\r\nName:         limited\r\nLabels:       kubernetes.io\/metadata.name=limited\r\nAnnotations:  &lt;none&gt;\r\nStatus:       Active\r\n\r\nResource Quotas\r\n  Name:     limitedquota\r\n  Resource  Used   Hard\r\n  --------  ---    ---\r\n  cpu       600m   1\r\n  memory    192Mi  2G\r\n  pods      3      5\r\n\r\nNo LimitRange resource.\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Creating Canary Deployments<\/span><\/p>\n<p>Run a Deployment with the name myweb, using the nginx:1.14 image and 3<br \/>\nreplicas. Ensure this Deployment is accessible through a Service with the name<br \/>\ncanary, which uses the NodePort Service type.<\/p>\n<p>Update the Deployment to the latest version of Nginx, using the canary<br \/>\nDeployment update strategy, in such a way that 40% of the application offers<br \/>\naccess to the updated application and 60% still uses the old application.<\/p>\n<pre class=\"lang:default decode:true\">[root@controller ~]# kubectl create deploy myweb-old --image=nginx:1.14 --replicas=3\r\ndeployment.apps\/myweb created\r\n[root@controller ~]# kubectl edit deploy myweb-old\r\n<\/pre>\n<p>Add label type: canary:<\/p>\n<pre class=\"lang:default decode:true\">  labels:\r\n    app: myweb-old\r\n  name: myweb-old\r\n  namespace: default\r\n  resourceVersion: \"1249762\"\r\n  uid: b42e79fa-a7a2-4b5c-b3ee-f6cccd3b7d1b\r\nspec:\r\n  progressDeadlineSeconds: 600\r\n  replicas: 3\r\n  revisionHistoryLimit: 10\r\n  selector:\r\n    matchLabels:\r\n      app: myweb-old\r\n  strategy:\r\n    rollingUpdate:\r\n      maxSurge: 25%\r\n      maxUnavailable: 25%\r\n    type: RollingUpdate\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: myweb-old\r\n<\/pre>\n<p>To:<\/p>\n<pre class=\"lang:default mark:3,25 decode:true\">  labels:\r\n    app: myweb-old\r\n    type: canary\r\n  name: myweb\r\n  namespace: default\r\n  resourceVersion: \"1249762\"\r\n  uid: b42e79fa-a7a2-4b5c-b3ee-f6cccd3b7d1b\r\nspec:\r\n  progressDeadlineSeconds: 600\r\n  replicas: 3\r\n  revisionHistoryLimit: 10\r\n  selector:\r\n    matchLabels:\r\n      app: myweb-old\r\n  strategy:\r\n    rollingUpdate:\r\n      maxSurge: 25%\r\n      maxUnavailable: 25%\r\n    type: RollingUpdate\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: myweb-old\r\n        type: canary\r\n<\/pre>\n<p>And now:<\/p>\n<pre class=\"lang:default mark:32 decode:true\">[root@controller ~]# kubectl get all --selector type=canary\r\nNAME                       READY   STATUS    RESTARTS   AGE\r\npod\/myweb-old-c6d768fc-h8pf6   1\/1     Running   0          5m3s\r\npod\/myweb-old-c6d768fc-rcfnx   1\/1     Running   0          5m6s\r\npod\/myweb-old-c6d768fc-srs7g   1\/1     Running   0          5m5s\r\n\r\nNAME                    READY   UP-TO-DATE   AVAILABLE   AGE\r\ndeployment.apps\/myweb-old   3\/3     3            3           18m\r\n\r\nNAME                             DESIRED   CURRENT   READY   AGE\r\nreplicaset.apps\/myweb-old-c6d768fc   3         3         3       5m6s\r\n\r\n\r\n[root@controller ~]# kubectl expose deploy myweb-old --name=myweb --selector type=canary --port=80\r\nservice\/myweb exposed\r\n\r\n\r\n[root@controller ~]# kubectl describe svc myweb\r\nName:              myweb\r\nNamespace:         default\r\nLabels:            app=myweb-old\r\n                   type=canary\r\nAnnotations:       &lt;none&gt;\r\nSelector:          type=canary\r\nType:              ClusterIP\r\nIP Family Policy:  SingleStack\r\nIP Families:       IPv4\r\nIP:                10.105.145.29\r\nIPs:               10.105.145.29\r\nPort:              &lt;unset&gt;  80\/TCP\r\nTargetPort:        80\/TCP\r\nEndpoints:         172.16.102.182:80,172.16.71.251:80,172.16.71.252:80\r\nSession Affinity:  None\r\nEvents:            &lt;none&gt;\r\n\r\n[root@controller ~]# kubectl create deploy myweb-new --image=nginx --replicas=2\r\ndeployment.apps\/myweb-new created\r\n\r\n[root@controller ~]# kubectl edit  deploy myweb-new\r\n<\/pre>\n<p>Add type: canary label:<\/p>\n<pre class=\"lang:default decode:true\">  labels:\r\n    app: myweb-new\r\n  name: myweb-new\r\n  namespace: default\r\n  resourceVersion: \"1252010\"\r\n  uid: ff3655ab-dbae-41ae-a92d-05e4b488e3f4\r\nspec:\r\n  progressDeadlineSeconds: 600\r\n  replicas: 2\r\n  revisionHistoryLimit: 10\r\n  selector:\r\n    matchLabels:\r\n      app: myweb-new\r\n  strategy:\r\n    rollingUpdate:\r\n      maxSurge: 25%\r\n      maxUnavailable: 25%\r\n    type: RollingUpdate\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: myweb-new\r\n<\/pre>\n<p>To:<\/p>\n<pre class=\"lang:default mark:3,25 decode:true\">  labels:\r\n    app: myweb-new\r\n    type: canary\r\n  name: myweb-new\r\n  namespace: default\r\n  resourceVersion: \"1252010\"\r\n  uid: ff3655ab-dbae-41ae-a92d-05e4b488e3f4\r\nspec:\r\n  progressDeadlineSeconds: 600\r\n  replicas: 2\r\n  revisionHistoryLimit: 10\r\n  selector:\r\n    matchLabels:\r\n      app: myweb-new\r\n  strategy:\r\n    rollingUpdate:\r\n      maxSurge: 25%\r\n      maxUnavailable: 25%\r\n    type: RollingUpdate\r\n  template:\r\n    metadata:\r\n      creationTimestamp: null\r\n      labels:\r\n        app: myweb-new\r\n        type: canary\r\n<\/pre>\n<p>And now we have three repliicas in the old version and two replicas in new version:<\/p>\n<pre class=\"lang:default mark:15,23 decode:true\">[root@controller ~]# kubectl describe svc myweb\r\nName:              myweb\r\nNamespace:         default\r\nLabels:            app=myweb\r\n                   type=canary\r\nAnnotations:       &lt;none&gt;\r\nSelector:          type=canary\r\nType:              ClusterIP\r\nIP Family Policy:  SingleStack\r\nIP Families:       IPv4\r\nIP:                10.105.145.29\r\nIPs:               10.105.145.29\r\nPort:              &lt;unset&gt;  80\/TCP\r\nTargetPort:        80\/TCP\r\nEndpoints:         172.16.102.182:80,172.16.102.184:80,172.16.71.251:80 + 2 more...\r\nSession Affinity:  None\r\nEvents:            &lt;none&gt;\r\n\r\n\r\n[root@controller ~]# kubectl get endpoints\r\nNAME         ENDPOINTS                                                          AGE\r\nkubernetes   172.30.9.25:6443                                                   9d\r\nmyweb        172.16.102.182:80,172.16.102.184:80,172.16.71.251:80 + 2 more...   15m\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Managing Pod Permissions<\/span><\/p>\n<p>Create a Pod manifest file to run a Pod with the name sleepybox. It should run<br \/>\nthe latest version of busybox, with the sleep 3600 command as the default<br \/>\ncommand. Ensure the primary Pod user is a member of the supplementary<br \/>\ngroup 2000 while this Pod is started.<\/p>\n<pre class=\"lang:default mark:50-51 decode:true\">[root@controller ~]# kubectl run sleepybox --image=busybox --dry-run=client -o yaml -- sleep 3600 &gt; task1515.yaml\r\n[root@controller ~]# cat task1515.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: sleepybox\r\n  name: sleepybox\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: busybox\r\n    name: sleepybox\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n\r\n[root@controller ~]# kubectl explain pod.spec.securityContext\r\n..\r\nFIELDS:\r\n  fsGroup       &lt;integer&gt;\r\n    A special supplemental group that applies to all containers in a pod. Some\r\n    volume types allow the Kubelet to change the ownership of that volume to be\r\n    owned by the pod:\r\n...\r\n\r\n[root@controller ~]# vim task1515.yaml\r\n[root@controller ~]# cat task1515.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: sleepybox\r\n  name: sleepybox\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: busybox\r\n    name: sleepybox\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  securityContext:\r\n    fsGroup: 2000\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl create -f task1515.yaml\r\npod\/sleepybox created\r\n\r\n[root@controller ~]# kubectl get pods sleepybox -o yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  annotations:\r\n    cni.projectcalico.org\/containerID: 81507fea72e2a315dd543f6103689a7db6d41d2bf0d4dc6645d97d0919d37fc9\r\n    cni.projectcalico.org\/podIP: 172.16.71.255\/32\r\n    cni.projectcalico.org\/podIPs: 172.16.71.255\/32\r\n  creationTimestamp: \"2024-03-09T21:16:57Z\"\r\n  labels:\r\n    run: sleepybox\r\n  name: sleepybox\r\n  namespace: default\r\n  resourceVersion: \"1254387\"\r\n  uid: ba0a9671-41f4-4fd6-89b9-032ecd44c3d0\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: busybox\r\n    imagePullPolicy: Always\r\n    name: sleepybox\r\n    resources: {}\r\n    terminationMessagePath: \/dev\/termination-log\r\n    terminationMessagePolicy: File\r\n    volumeMounts:\r\n    - mountPath: \/var\/run\/secrets\/kubernetes.io\/serviceaccount\r\n      name: kube-api-access-cmnjt\r\n      readOnly: true\r\n  dnsPolicy: ClusterFirst\r\n  enableServiceLinks: true\r\n  nodeName: worker2.example.com\r\n  preemptionPolicy: PreemptLowerPriority\r\n  priority: 0\r\n  restartPolicy: Always\r\n  schedulerName: default-scheduler\r\n  securityContext:\r\n    fsGroup: 2000\r\n  serviceAccount: default\r\n...\r\n<\/pre>\n<p>&nbsp;<\/p>\n<p><span style=\"color: #3366ff;\">Using a ServiceAccount<\/span><\/p>\n<p>Create a Pod with the name allaccess. Also create a ServiceAccount with the<br \/>\nname allaccess and ensure that the Pod is using the ServiceAccount. Notice<br \/>\nthat no further RBAC setup is required.<\/p>\n<pre class=\"lang:default mark:43,44,90,91 decode:true\">[root@controller ~]# kubectl create sa allaccess\r\nserviceaccount\/allaccess created\r\n\r\n[root@controller ~]# kubectl run allaccess --image=busybox --dry-run=client -o yaml -- sleep 3600 &gt; task1516.yaml\r\n\r\n[root@controller ~]# cat task1516.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: allaccess\r\n  name: allaccess\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: busybox\r\n    name: allaccess\r\n    resources: {}\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# vi task1516.yaml\r\n[root@controller ~]# cat task1516.yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  creationTimestamp: null\r\n  labels:\r\n    run: allaccess\r\n  name: allaccess\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: busybox\r\n    name: allaccess\r\n    resources: {}\r\n  serviceAccount: allaccess\r\n  serviceAccountName: allaccess\r\n  dnsPolicy: ClusterFirst\r\n  restartPolicy: Always\r\nstatus: {}\r\n\r\n[root@controller ~]# kubectl create -f task1516.yaml\r\npod\/allaccess created\r\n\r\n[root@controller ~]# kubectl get pods -o yaml allaccess\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n  annotations:\r\n    cni.projectcalico.org\/containerID: 9c902c989429e833cb4caa1e841a265d701656a61dd6fb1db4f7a1ff9677398b\r\n    cni.projectcalico.org\/podIP: 172.16.71.194\/32\r\n    cni.projectcalico.org\/podIPs: 172.16.71.194\/32\r\n  creationTimestamp: \"2024-03-09T21:33:20Z\"\r\n  labels:\r\n    run: allaccess\r\n  name: allaccess\r\n  namespace: default\r\n  resourceVersion: \"1255980\"\r\n  uid: c1ef23d1-df96-4398-b7fb-c4b61567dfa1\r\nspec:\r\n  containers:\r\n  - args:\r\n    - sleep\r\n    - \"3600\"\r\n    image: busybox\r\n    imagePullPolicy: Always\r\n    name: allaccess\r\n    resources: {}\r\n    terminationMessagePath: \/dev\/termination-log\r\n    terminationMessagePolicy: File\r\n    volumeMounts:\r\n    - mountPath: \/var\/run\/secrets\/kubernetes.io\/serviceaccount\r\n      name: kube-api-access-bmz7l\r\n      readOnly: true\r\n  dnsPolicy: ClusterFirst\r\n  enableServiceLinks: true\r\n  nodeName: worker2.example.com\r\n  preemptionPolicy: PreemptLowerPriority\r\n  priority: 0\r\n  restartPolicy: Always\r\n  schedulerName: default-scheduler\r\n  securityContext: {}\r\n  serviceAccount: allaccess\r\n  serviceAccountName: allaccess\r\n...\r\n<\/pre>\n<p>&nbsp;<\/p>\n","protected":false},"excerpt":{"rendered":"","protected":false},"author":1,"featured_media":5943,"comment_status":"closed","ping_status":"closed","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[99],"tags":[],"_links":{"self":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts\/5665"}],"collection":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/comments?post=5665"}],"version-history":[{"count":54,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts\/5665\/revisions"}],"predecessor-version":[{"id":5945,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/posts\/5665\/revisions\/5945"}],"wp:featuredmedia":[{"embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/media\/5943"}],"wp:attachment":[{"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/media?parent=5665"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/categories?post=5665"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/miro.borodziuk.eu\/index.php\/wp-json\/wp\/v2\/tags?post=5665"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}