部署 OTEL yaml 文件备忘
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: opentelemetry-operator-system
name: otel-collector-conf
labels:
app: opentelemetry
component: otel-collector-conf
data:
otel-collector-config: |
receivers:
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${MY_POD_IP}:8888
jaeger:
protocols:
grpc:
endpoint: ${MY_POD_IP}:14250
thrift_compact:
endpoint: ${MY_POD_IP}:6831
thrift_http:
endpoint: ${MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
processors:
batch:
memory_limiter:
# 80% of maximum memory up to 2G
limit_mib: 1500
# 25% of limit up to 2G
spike_limit_mib: 512
check_interval: 5s
extensions:
zpages: {}
memory_ballast:
# Memory Ballast size should be max 1/3 to 1/2 of memory.
size_mib: 683
exporters:
logging:
loglevel: warn
otlphttp/elastic:
endpoint: "http://elk.rakour.com:8200"
headers:
Authorization: "Bearer XQtc2VydmVyL3Rva2VuLTE2ODI1NjA0NDE3NzY6NXdmckZOQy1"
jaeger:
endpoint: jaeger-collector.jaeger.svc.cluster.local:14250
tls:
insecure: true
service:
extensions: [zpages, memory_ballast]
pipelines:
metrics:
exporters:
- logging
- otlphttp/elastic
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
logs:
exporters:
- logging
- otlphttp/elastic
processors:
- memory_limiter
- batch
receivers:
- otlp
traces:
exporters:
- logging
- otlphttp/elastic
- jaeger
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
---
apiVersion: v1
kind: Service
metadata:
name: otel-collector
labels:
app: opentelemetry
component: otel-collector
spec:
ports:
- name: otlp-grpc # Default endpoint for OpenTelemetry gRPC receiver.
port: 4317
protocol: TCP
targetPort: 4317
- name: otlp-http # Default endpoint for OpenTelemetry HTTP receiver.
port: 4318
protocol: TCP
targetPort: 4318
- name: metrics # Default endpoint for querying metrics.
port: 8888
selector:
component: otel-collector
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
namespace: opentelemetry-operator-system
name: otel-agent
labels:
app: opentelemetry
component: otel-collector
spec:
selector:
matchLabels:
app: opentelemetry
component: otel-collector
template:
metadata:
labels:
app: opentelemetry
component: otel-collector
spec:
serviceAccountName: otel-collector-account
containers:
- name: collector
command:
- "/otelcol-contrib"
- "--config=/conf/otel-collector-config.yaml"
image: otel/opentelemetry-collector-contrib:0.82.0
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
ports:
- containerPort: 4318 # default port for OpenTelemetry HTTP receiver.
hostPort: 4318
- containerPort: 4317 # default port for OpenTelemetry gRPC receiver.
hostPort: 4317
- containerPort: 8888 # Default endpoint for querying metrics.
volumeMounts:
- name: otel-agent-config-vol
mountPath: /conf
- name: varlogpods
mountPath: /var/log/pods
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
# The k8s.pod.ip is used to associate pods with k8sattributes.
# It is useful to have in the Collector pod because receiver metrics can also
# benefit from the tags.
- name: OTEL_RESOURCE_ATTRIBUTES
value: "k8s.pod.ip=$(POD_IP)"
volumes:
- name: otlpgen
hostPath:
path: /otlpgen
- name: otel-agent-config-vol
configMap:
name: otel-collector-conf
items:
- key: otel-collector-config
path: otel-collector-config.yaml
# Mount nodes log file location.
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
发表回复