本文最后更新于134 天前,其中的信息可能已经过时,如有错误请发送邮件到big_fw@foxmail.com
Elasticsearch—pss修改GC策略
-
创建es-pss的cm当做配置文件挂载到容器内部
apiVersion: v1 data: jvm.options: |- ## JVM configuration ################################################################ ## IMPORTANT: JVM heap size ################################################################ ## ## You should always set the min and max JVM heap ## size to the same value. For example, to set ## the heap to 4 GB, set: ## ## -Xms4g ## -Xmx4g ## ## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html ## for more information ## ################################################################ # Xms represents the initial size of total heap space # Xmx represents the maximum size of total heap space -Xms8g -Xmx8g ################################################################ ## Expert settings ################################################################ ## ## All settings below this section are considered ## expert settings. Don't tamper with them unless ## you understand what you are doing ## ################################################################ ## GC configuration #-XX:+UseConcMarkSweepGC #-XX:CMSInitiatingOccupancyFraction=75 #-XX:+UseCMSInitiatingOccupancyOnly -XX:+UseG1GC -XX:MaxGCPauseMillis=200 ## optimizations # pre-touch memory pages used by the JVM during initialization -XX:+AlwaysPreTouch ## basic # explicitly set the stack size -Xss1m # set to headless, just in case -Djava.awt.headless=true # ensure UTF-8 encoding by default (e.g. filenames) -Dfile.encoding=UTF-8 # use our provided JNA always versus the system one -Djna.nosys=true # turn off a JDK optimization that throws away stack traces for common # exceptions because stack traces are important for debugging -XX:-OmitStackTraceInFastThrow # flags to configure Netty -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 # log4j 2 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.io.tmpdir=${ES_TMPDIR} ## heap dumps # generate a heap dump when an allocation from the Java heap fails # heap dumps are created in the working directory of the JVM -XX:+HeapDumpOnOutOfMemoryError # specify an alternative path for heap dumps; ensure the directory exists and # has sufficient space -XX:HeapDumpPath=data # specify an alternative path for JVM fatal error logs -XX:ErrorFile=logs/hs_err_pid%p.log ## JDK 8 GC logging 8:-XX:+PrintGCDetails 8:-XX:+PrintGCDateStamps 8:-XX:+PrintTenuringDistribution 8:-XX:+PrintGCApplicationStoppedTime 8:-Xloggc:logs/gc.log 8:-XX:+UseGCLogFileRotation 8:-XX:NumberOfGCLogFiles=32 8:-XX:GCLogFileSize=64m # JDK 9+ GC logging 9-:-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m # due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise # time/date parsing will break in an incompatible way for some date patterns and locals 9-:-Djava.locale.providers=COMPAT # temporary workaround for C2 bug with JDK 10 on hardware with AVX-512 10-:-XX:UseAVX=2 kind: ConfigMap metadata: name: es-pss-jvm
kubectl apply -f es-pss-jvm.yaml -n pro
kubectl apply -f es-pss-jvm.yaml -n test
- 停止svc的流量
kubectl patch -n test svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss-nonexistent-label", "release": "elasticsearch-pss-nonexistent-label"}}}'
kubectl patch -n pro svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss-nonexistent-label", "release": "elasticsearch-pss-nonexistent-label"}}}'
- 挂载到elasticsearch中
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: elasticsearch-pss
chart: elasticsearch-pss-1.16.3
heritage: Tiller
io.cattle.field/appId: elasticsearch-pss
release: elasticsearch-pss
velero.io/backup-name: test-backup
velero.io/restore-name: test-backup-20230210174913
name: elasticsearch-pss
namespace: test
spec:
progressDeadlineSeconds: 6000
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: elasticsearch-pss
release: elasticsearch-pss
strategy:
type: Recreate
template:
metadata:
labels:
app: elasticsearch-pss
release: elasticsearch-pss
spec:
containers:
- env:
- name: ES_JAVA_OPTS
value: -Xms4g -Xmx4g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp/elasticsearch-pss/elasticsearch-pss.hprof
image: registry-hz.rubikstack.com/lichen/elasticsearch-ik-pinyin:6.4.3
imagePullPolicy: IfNotPresent
name: elasticsearch-pss
ports:
- containerPort: 9200
name: http
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
resources:
limits:
memory: 8Gi
requests:
memory: 2Gi
terminationMessagePath: /test/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /usr/share/elasticsearch/config/jvm.options
name: es-jvm
subPath: jvm.options
- mountPath: /usr/share/elasticsearch/data
name: data
- mountPath: /tmp/elasticsearch-pss
name: vol1
dnsPolicy: ClusterFirst
initContainers:
- command:
- sysctl
- -w
- vm.max_map_count=262144
image: registry-hz.rubikstack.com/library/job:1115
imagePullPolicy: IfNotPresent
name: configure-sysctl
resources: {}
securityContext:
privileged: true
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 1000
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
name: es-pss-jvm
name: es-jvm
- name: data
persistentVolumeClaim:
claimName: elasticsearch-pss
- hostPath:
path: /tmp/elasticsearch-pss
type: ""
name: vol1
- 容器启动后恢复svc流量
kubectl patch -n test svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss", "release": "elasticsearch-pss"}}}'
kubectl patch -n pro svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss", "release": "elasticsearch-pss"}}}'
- master节点设置crontab,每个月三号,十八号,自动重启es-pss的pod,释放内存
0 3 3,18 * * /usr/scripts/delete-es-pss-pod.sh
#!/bin/bash
# 停止指向标签为 app=elasticsearch-pss 和 release=elasticsearch-pss 的 Pod 的流量
kubectl patch -n ptest svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss-nonexistent-label", "release": "elasticsearch-pss-nonexistent-label"}}}'
kubectl patch -n pro svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss-nonexistent-label", "release": "elasticsearch-pss-nonexistent-label"}}}'
#停止60s后删除pod
sleep 60s
# 删除标签为 app=elasticsearch-pss 的 Pod
kubectl delete pod -n ptest -l app=elasticsearch-pss
kubectl delete pod -n pro -l app=elasticsearch-pss
# 睡眠两分钟
sleep 120s
# 恢复指向标签为 app=elasticsearch-pss 和 release=elasticsearch-pss 的 Pod 的流量
kubectl patch -n ptest svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss", "release": "elasticsearch-pss"}}}'
kubectl patch -n pro svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss", "release": "elasticsearch-pss"}}}'
#!/bin/bash
# 获取标签为 es=pss 的 Pod 所在的节点名称
node=$(kubectl get pod -l app=elasticsearch-pss -o jsonpath='{.items[0].spec.nodeName}' -n pro)
# 检查节点调度状态
schedulable=$(kubectl get node $node -o jsonpath='{.spec.unschedulable}')
if [[ $schedulable == "true" ]]; then
echo "节点 $node 不可调度,不执行命令。"
else
echo "节点 $node 可调度,执行命令。"
# 停止指向标签为 app=elasticsearch-pss 和 release=elasticsearch-pss 的 Pod 的流量
kubectl patch -n ptest svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss-nonexistent-label", "release": "elasticsearch-pss-nonexistent-label"}}}'
kubectl patch -n pro svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss-nonexistent-label", "release": "elasticsearch-pss-nonexistent-label"}}}'
# 停止60s后删除pod
sleep 60s
# 删除标签为 app=elasticsearch-pss 的 Pod
kubectl delete pod -n ptest -l app=elasticsearch-pss
kubectl delete pod -n pro -l app=elasticsearch-pss
# 睡眠两分钟
sleep 120s
# 恢复指向标签为 app=elasticsearch-pss 和 release=elasticsearch-pss 的 Pod 的流量
kubectl patch -n ptest svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss", "release": "elasticsearch-pss"}}}'
kubectl patch -n pro svc elasticsearch-pss -p '{"spec":{"selector": {"app": "elasticsearch-pss", "release": "elasticsearch-pss"}}}'
fi