bitnami opensearch와 opensearch data-prepper, fluentbit로 springboot 로그 파이프라인을 구성해보자.
wget https://charts.bitnami.com/bitnami/opensearch-1.3.12.tgz
tar -xvf opensearch-1.3.12.tgz
cd opensearch-1.3.12
vi values.yaml
# storageClass를 변경해준다.
helm install opensearch -f values.yaml
openshift의 경우 statefulset의 initContainers에서 securityContext privileged: false와 runAsNonRoot: true 설정이 필요하다.
다음으로 data-prepper를 구성한다.
kind: Deployment
apiVersion: apps/v1
metadata:
name: data-prepper
labels:
app: data-prepper
spec:
replicas: 1
selector:
matchLabels:
app: data-prepper
template:
metadata:
creationTimestamp: null
labels:
app: data-prepper
spec:
volumes:
- name: prepper-configmap-config
configMap:
name: data-prepper-config
items:
- key: data-prepper-config.yaml
path: data-prepper-config.yaml
defaultMode: 420
- name: prepper-configmap-pipelines
configMap:
name: data-prepper-config
items:
- key: pipelines.yaml
path: pipelines.yaml
defaultMode: 420
containers:
- name: data-prepper
image: 'opensearchproject/data-prepper:2'
ports:
- containerPort: 21890
protocol: TCP
resources: {}
volumeMounts:
- name: prepper-configmap-config
mountPath: /usr/share/data-prepper/config/data-prepper-config.yaml
subPath: data-prepper-config.yaml
- name: prepper-configmap-pipelines
mountPath: /usr/share/data-prepper/pipelines/pipelines.yaml
subPath: pipelines.yaml
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
schedulerName: default-scheduler
strategy:
type: Recreate
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: data-prepper-headless
spec:
ipFamilies:
- IPv4
ports:
- protocol: TCP
port: 21890
targetPort: 21890
internalTrafficPolicy: Cluster
type: ClusterIP
ipFamilyPolicy: SingleStack
sessionAffinity: None
selector:
app: data-prepper
---
kind: Service
apiVersion: v1
metadata:
name: data-prepper-metrics
spec:
ipFamilies:
- IPv4
ports:
- protocol: TCP
port: 4900
targetPort: 4900
internalTrafficPolicy: Cluster
type: ClusterIP
ipFamilyPolicy: SingleStack
sessionAffinity: None
selector:
app: data-prepper
---
kind: ConfigMap
apiVersion: v1
metadata:
name: data-prepper-config
data:
data-prepper-config.yaml: |
ssl: false
serverPort: 4900
peer_forwarder:
discovery_mode: dns
domain_name: "data-prepper-headless"
pipelines.yaml: |
log-pipeline:
source:
http:
path: "/log/ingest"
port: 21890
sink:
- opensearch:
hosts: ["http://opensearch:9200"]
index: app-parse # opensearch에서 사용할 index 명
index_type: custom
bulk_size: 200
- stdout:
fluent-bit를 설치해보자. helm 구성한다.
helm repo add fluent https://fluent.github.io/helm-charts
wget https://github.com/fluent/helm-charts/releases/download/fluent-bit-0.47.10/fluent-bit-0.47.10.tgz
tar -xvf ./fluent-bit-0.47.10.tgz
cd fluent-bit-0.47.10
vi values.yaml
pv에 마운트된 application log를 data prepper로 보낼수 있도록 OUTPUT을 세팅한다.
affinity: {}
args: []
automountServiceAccountToken: true
autoscaling:
hpa:
annotations: {}
behavior: {}
customRules: []
enabled: false
maxReplicas: 3
minReplicas: 1
targetCPUUtilizationPercentage: 75
targetMemoryUtilizationPercentage: ''
vpa:
annotations: {}
controlledResources: []
enabled: false
maxAllowed: {}
minAllowed: {}
updatePolicy:
updateMode: Auto
command: []
commonAnnotations: {}
commonLabels: {}
config:
customParsers: |
[PARSER]
Name docker_no_time
Format json
Time_Keep Off
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
extraFiles: {}
filters: ''
flush: 1
inputs: |
[INPUT]
name tail
tag app_log
path /tmp/logs/app*/app*/*-access.log
path_key filepath
refresh_interval 5
rotate_wait 5
mem_buf_limit 10MB
read_from_head true
logLevel: debug
outputs: |
[OUTPUT]
Name http
Match *
Host data-prepper-headless
Port 21890
URI /log/ingest
Format json
[OUTPUT]
Name stdout
Match *
service: |
[SERVICE]
Flush {{ .Values.config.flush }}
Daemon Off
Log_Level {{ .Values.config.logLevel }}
Config_Watch On
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.containerPorts.http }}
Parsers_File /opt/bitnami/fluent-bit/conf/parsers.conf
Parsers_File /opt/bitnami/fluent-bit/conf/custom_parsers.conf
upstream: {}
containerPorts:
http: 2020
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
enabled: false
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
customLivenessProbe: {}
customReadinessProbe: {}
customStartupProbe: {}
daemonset:
enabled: false
hostPaths:
containerLogs: /var/lib/docker/containers
logs: /var/log
machineId: /etc/machine-id
podSecurityContext:
enabled: false
fsGroup: 0
fsGroupChangePolicy: Always
supplementalGroups: []
sysctls: []
diagnosticMode:
args:
- infinity
command:
- sleep
enabled: false
existingConfigMap: ''
extraDeploy: []
extraEnvVars: []
extraEnvVarsCM: ''
extraEnvVarsSecret: ''
extraVolumeMounts: []
extraVolumes: []
fullnameOverride: ''
global:
compatibility:
openshift:
adaptSecurityContext: auto
defaultStorageClass: ''
imagePullSecrets: []
imageRegistry: ''
storageClass: 'nfs-client' #storageClassName
hostAliases: []
hostNetwork: false
image:
debug: false
digest: ''
pullPolicy: IfNotPresent
pullSecrets: []
registry: docker.io
repository: bitnami/fluent-bit
tag: 3.1.9-debian-12-r0
ingress:
annotations: {}
apiVersion: ''
enabled: false
extraHosts: []
extraPaths: []
extraRules: []
extraTls: []
hostname: fluent-bit.local
ingressClassName: ''
path: /
pathType: ImplementationSpecific
secrets: []
selfSigned: false
tls: false
initContainers: []
kubeVersion: ''
lifecycleHooks: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
metrics:
enabled: false
serviceMonitor:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '{{ .Values.service.ports.http }}'
prometheus.io/scrape: 'true'
enabled: false
honorLabels: false
interval: ''
jobLabel: ''
labels: {}
metricRelabelings: []
namespace: ''
relabelings: []
scrapeTimeout: ''
selector: {}
nameOverride: ''
networkPolicy:
allowExternal: true
allowExternalEgress: true
enabled: true
extraEgress: []
extraIngress: []
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
kubeAPIServerPorts:
- 443
- 6443
- 8443
nodeAffinityPreset:
key: ''
type: ''
values: []
nodeSelector: {}
pdb:
create: true
maxUnavailable: 50%
minAvailable: ''
podAffinityPreset: ''
podAnnotations: {}
podAntiAffinityPreset: soft
podLabels: {}
podSecurityContext:
enabled: false
fsGroupChangePolicy: Always
supplementalGroups: []
sysctls: []
priorityClassName: ''
rbac:
create: false
nodeAccess: false
rules: []
readinessProbe:
enabled: true
failureThreshold: 15
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
replicaCount: 1
resources: {}
resourcesPreset: nano
schedulerName: ''
service:
annotations: {}
clusterIP: ''
externalTrafficPolicy: Cluster
extraPorts: []
loadBalancerIP: ''
loadBalancerSourceRanges: []
nodePorts:
http: ''
ports:
http: 2020
sessionAffinity: None
sessionAffinityConfig: {}
type: ClusterIP
serviceAccount:
annotations: {}
automountServiceAccountToken: false
create: false
name: default
sidecars: []
startupProbe:
enabled: true
failureThreshold: 15
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
tolerations: []
topologySpreadConstraints: []
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
이제 스프링부트 logback에서 설정한 fileAppender format에 맞춰 log를 파싱하여 field를 생성하고 opensearch에서 조회해보자.
728x90
'CloudNative > Observability & Analysis' 카테고리의 다른 글
| kubecost (2) | 2024.11.11 |
|---|---|
| springboot log pipeline: EFK 로그 전처리와 opensearch (0) | 2024.11.07 |
| jib maven 으로 pinpoint agent 배포 (0) | 2024.10.18 |
| pinpoint server 2.5.4 k8s deploy (2) | 2024.10.18 |
| pinpoint server 2.5.4 YAML for k8s (0) | 2024.10.17 |