kubernetes

[따배쿠] 로그 관리

bbiyak2da 2025. 1. 8. 13:39

Pod 로그 관리

로그 활용법

  • 로그 수집 -> 로그 정제 -> 로그 보존 -> 분류 후 시각화

 

개별 Pod의 로그 확인

  • kubectl logs <pod name>

 

실습

 

# pod 생성 (deployment 이용)

root@master:~# kubectl create deployment my-nginx --image nginx:1.14 --port 80 --replicas 2
deployment.apps/my-nginx created

 

# pod 확인

root@master:~/Getting-Start-Kubernetes/17# kubectl get pods -o wide
NAME                        READY   STATUS    RESTARTS   AGE     IP                NODE    NOMINATED NODE   READINESS GATES
my-nginx-587f57b444-ffpxq   1/1     Running   0          3m28s   192.168.166.132   node1   <none>           <none>
my-nginx-587f57b444-glhq7   1/1     Running   0          3m28s   192.168.104.4     node2   <none>           <none>

 

# 테스트 pod 생성 및 확인

root@master:~# kubectl run testpod --image=nginx
root@master:~# kubectl get pods -o wide
NAME                        READY   STATUS    RESTARTS   AGE    IP                NODE    NOMINATED NODE   READINESS GATES
my-nginx-587f57b444-ffpxq   1/1     Running   0          100m   192.168.166.132   node1   <none>           <none>
my-nginx-587f57b444-glhq7   1/1     Running   0          100m   192.168.104.4     node2   <none>           <none>
testpod                     1/1     Running   0          19m    192.168.104.5     node2   <none>           <none>

 

# 테스트 pod에서 다른 pod로 접속

root@testpod:/# curl 192.168.104.4
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

 

# pod의 로그 확인

root@master:~# kubectl logs my-nginx-587f57b444-glhq7
192.168.104.5 - - [08/Jan/2025:06:06:45 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.88.1" "-"
192.168.104.5 - - [08/Jan/2025:06:14:13 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.88.1" "-"

 

두 Pod들의 단일 진입점 Service를 만들어보자.

 

# Service 생성

root@master:~# kubectl expose deployment my-nginx --port 80 --target-port 80
root@master:~# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP   3h50m
my-nginx     ClusterIP   10.110.215.206   <none>        80/TCP    74m

 

EFK를 활용한 K8S 애플리케이션 로그 관리

 

1. 각 Node에 있는 fluentd가 daemonset으로 log를 수집

2. ElasticSearch는 fluentd가 수집한 로그를 스토리지에 저장

3. 저장된 로그를 kibana로 시각화

 

* ElasticSearch : 로그 저장 / Fluentd : 로그 수집 및 정제 / Kibana : 로그 시각화

 

ElasticSearch 구성

 

1. elasticsearch 마스터 노드 생성

 

# namespace 생성

root@master:~/efk# vi namesapce.yaml
apiVersion: v1
kind: Namespace
metadata:
    name: kube-logging
root@master:~/efk# kubectl apply -f namespace.yaml
namespace/kube-logging created

 

# namespace 확인

root@master:~/efk# kubectl get namespaces
NAME              STATUS   AGE
default           Active   5h34m
kube-logging      Active   4s
kube-node-lease   Active   5h34m
kube-public       Active   5h34m
kube-system       Active   5h34m

 

 

[elasticsearch-master-configmap.yaml]

root@master:~/efk# vi elasticsearch-master-configmap.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: kube-logging
  name: elasticsearch-master-config
  labels:
    app: elasticsearch
    role: master
data:
  elasticsearch.yml: |-
    cluster.name: ${CLUSTER_NAME}
    node.name: ${NODE_NAME}
    discovery.seed_hosts: ${NODE_LIST}
    cluster.initial_master_nodes: ${MASTER_NODES}
    network.host: 0.0.0.0
    node:
      master: true
      data: false
      ingest: false
    xpack.security.enabled: true
    xpack.monitoring.collection.enabled: true
---
root@master:~/efk# kubectl apply -f elasticsearch-master-configmap.yaml
configmap/elasticsearch-master-config created

 

[elasticsearch-master-service.yaml]

root@master:~/efk# vi elasticsearch-master-service.yaml
---
apiVersion: v1
kind: Service
metadata:
  namespace: kube-logging
  name: elasticsearch-master
  labels:
    app: elasticsearch
    role: master
spec:
  ports:
  - port: 9300
    name: transport
  selector:
    app: elasticsearch
    role: master
---

 

elasticserachs는 9300 포트로 작동한다.

root@master:~/efk# kubectl apply -f elasticsearch-master-service.yaml
service/elasticsearch-master created

 

[elasticsearch-master-deployment.yaml]

root@master:~/efk# vi elasticsearch-master-deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: kube-logging
  name: elasticsearch-master
  labels:
    app: elasticsearch
    role: master
spec:
  replicas: 1
  selector:
    matchLabels:
      app: elasticsearch
      role: master
  template:
    metadata:
      labels:
        app: elasticsearch
        role: master
    spec:
      containers:
      - name: elasticsearch-master
        image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0
        env:
        - name: CLUSTER_NAME
          value: elasticsearch
        - name: NODE_NAME
          value: elasticsearch-master
        - name: NODE_LIST
          value: elasticsearch-master,elasticsearch-data,elasticsearch-client
        - name: MASTER_NODES
          value: elasticsearch-master
        - name: "ES_JAVA_OPTS"
          value: "-Xms256m -Xmx256m"
        ports:
        - containerPort: 9300
          name: transport
        volumeMounts:
        - name: config
          mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
          readOnly: true
          subPath: elasticsearch.yml
        - name: storage
          mountPath: /data
      volumes:
      - name: config
        configMap:
          name: elasticsearch-master-config
      - name: "storage"
        emptyDir:
          medium: ""
      initContainers:
      - name: increase-vm-max-map
        image: busybox
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
---
root@master:~/efk# kubectl apply -f elasticsearch-master-deployment.yaml
deployment.apps/elasticsearch-master created

 

 

# 확인

root@master:~/efk# kubectl get pods -n kube-logging
NAME                                    READY   STATUS    RESTARTS   AGE
elasticsearch-master-8459b8fc8c-hb946   1/1     Running   0          2m6s

 


 

2. ElasticSearch 데이터 노드 생성

 

elasticsearch가 데이터를 저장할 수 있도록 데이터 노드를 생성해보자

 

[pv.yaml]

root@master:~/efk# vi pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv1
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteOnce
  - ReadOnlyMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: gp2
  hostPath:
    path: /logdata

 

elasticsearch가 사용할 pv를 만들어준다.

 

root@master:~/efk# kubectl apply -f pv.yaml
Warning: spec.persistentVolumeReclaimPolicy: The Recycle reclaim policy is deprecated. Instead, the recommended approach is to use dynamic provisioning.
persistentvolume/pv1 created

 

 

[elasticsearch-data-configmap.yaml]

root@master:~/efk# vi elasticsearch-data-configmap.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: kube-logging
  name: elasticsearch-data-config
  labels:
    app: elasticsearch
    role: data
data:
  elasticsearch.yml: |-
    cluster.name: ${CLUSTER_NAME}
    node.name: ${NODE_NAME}
    discovery.seed_hosts: ${NODE_LIST}
    cluster.initial_master_nodes: ${MASTER_NODES}
    network.host: 0.0.0.0
    node:
      master: false
      data: true
      ingest: false
    xpack.security.enabled: true
    xpack.monitoring.collection.enabled: true
---
root@master:~/efk# kubectl apply -f elasticsearch-data-configmap.yaml
configmap/elasticsearch-data-config created

 

 

[elasticsearch-data-service.yaml]

root@master:~/efk# vi elasticsearch-data-service.yaml
---
apiVersion: v1
kind: Service
metadata:
  namespace: kube-logging
  name: elasticsearch-data
  labels:
    app: elasticsearch
    role: data
spec:
  ports:
  - port: 9300
    name: transport
  selector:
    app: elasticsearch
    role: data
---
root@master:~/efk# kubectl apply -f elasticsearch-data-service.yaml
service/elasticsearch-data created

 

[elasticsearch-data-statefulset.yaml]

root@master:~/efk# vi elasticsearch-data-statefulset.yaml
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: kube-logging
  name: elasticsearch-data
  labels:
    app: elasticsearch
    role: data
spec:
  serviceName: "elasticsearch-data"
  selector:
    matchLabels:
      app: elasticsearch-data
      role: data
  replicas: 1
  template:
    metadata:
      labels:
        app: elasticsearch-data
        role: data
    spec:
      containers:
      - name: elasticsearch-data
        image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0
        env:
        - name: CLUSTER_NAME
          value: elasticsearch
        - name: NODE_NAME
          value: elasticsearch-data
        - name: NODE_LIST
          value: elasticsearch-master,elasticsearch-data,elasticsearch-client
        - name: MASTER_NODES
          value: elasticsearch-master
        - name: "ES_JAVA_OPTS"
          value: "-Xms300m -Xmx300m"
        ports:
        - containerPort: 9300
          name: transport
        volumeMounts:
        - name: config
          mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
          readOnly: true
          subPath: elasticsearch.yml
        - name: elasticsearch-data-persistent-storage
          mountPath: /data/db
      volumes:
      - name: config
        configMap:
          name: elasticsearch-data-config
      initContainers:
      - name: increase-vm-max-map
        image: busybox
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
  volumeClaimTemplates:
  - metadata:
      name: elasticsearch-data-persistent-storage
      annotations:
        volume.beta.kubernetes.io/storage-class: "gp2"
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: standard
      resources:
        requests:
          storage: 10Gi
---
root@master:~/efk# kubectl apply -f elasticsearch-data-statefulset.yaml
statefulset.apps/elasticsearch-data created

 

# 확인

root@master:~/efk# kubectl get pods -n kube-logging
NAME                                    READY   STATUS    RESTARTS   AGE
elasticsearch-data-0                    1/1     Running   0          10m
elasticsearch-master-8459b8fc8c-hb946   1/1     Running   0          24m
root@master:~/efk# kubectl get pv
NAME   CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                                                                     STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
pv1    10Gi       RWO,ROX        Recycle          Bound    kube-logging/elasticsearch-data-persistent-storage-elasticsearch-data-0   gp2            <unset>                          3m32s

 

[참고 문서]

https://waspro.tistory.com/762


 

3. ElasticSearch 클라이언트 노드 생성

 

ElasticSearch로 수집된 로그를 마스터에 저장할 수 있도록 하는 클라이언트 생성해보자

 

[elasticsearch-client-configmap.yaml]

---
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: kube-logging
  name: elasticsearch-client-config
  labels:
    app: elasticsearch
    role: client
data:
  elasticsearch.yml: |-
    cluster.name: ${CLUSTER_NAME}
    node.name: ${NODE_NAME}
    discovery.seed_hosts: ${NODE_LIST}
    cluster.initial_master_nodes: ${MASTER_NODES}
    network.host: 0.0.0.0
    node:
      master: false
      data: false
      ingest: true
    xpack.security.enabled: true
    xpack.monitoring.collection.enabled: true
---
root@master:~/efk# kubectl apply -f elasticsearch-client-configmap.yaml
configmap/elasticsearch-client-config created

 

 

[elasticsearch-client-service.yaml]

root@master:~/efk# vi elasticsearch-client-service.yaml
---
apiVersion: v1
kind: Service
metadata:
  namespace: kube-logging
  name: elasticsearch-client
  labels:
    app: elasticsearch
    role: client
spec:
  ports:
  - port: 9200
    name: client
  - port: 9300
    name: transport
  selector:
    app: elasticsearch
    role: client
---
root@master:~/efk# kubectl apply -f elasticsearch-client-service.yaml
service/elasticsearch-client created

 

[elasticsearch-client-deployment.yaml]

root@master:~/efk# vi elasticsearch-client-deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: kube-logging
  name: elasticsearch-client
  labels:
    app: elasticsearch
    role: client
spec:
  replicas: 1
  selector:
    matchLabels:
      app: elasticsearch
      role: client
  template:
    metadata:
      labels:
        app: elasticsearch
        role: client
    spec:
      containers:
      - name: elasticsearch-client
        image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0
        env:
        - name: CLUSTER_NAME
          value: elasticsearch
        - name: NODE_NAME
          value: elasticsearch-client
        - name: NODE_LIST
          value: elasticsearch-master,elasticsearch-data,elasticsearch-client
        - name: MASTER_NODES
          value: elasticsearch-master
        - name: "ES_JAVA_OPTS"
          value: "-Xms256m -Xmx256m"
        ports:
        - containerPort: 9200
          name: client
        - containerPort: 9300
          name: transport
        volumeMounts:
        - name: config
          mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
          readOnly: true
          subPath: elasticsearch.yml
        - name: storage
          mountPath: /data
      volumes:
      - name: config
        configMap:
          name: elasticsearch-client-config
      - name: "storage"
        emptyDir:
          medium: ""
      initContainers:
      - name: increase-vm-max-map
        image: busybox
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
---
root@master:~/efk# kubectl apply -f elasticsearch-client-deployment.yaml
deployment.apps/elasticsearch-client created