标签:beat srv inter forward 时间 stash alpine too kibana
10.100.2.21 node1
10.100.2.22 node2
10.100.2.23 node3
zookeeper soft nofile 65536
zookeeper hard nofile 65536
zookeeper soft nproc 65536
zookeeper hard nproc 65536
kafka soft nofile 65536
kafka hard nofile 65536
kafka soft nproc 65536
kafka hard nproc 65536
useradd kafka
useradd zookeeper
mkdir -p /srv/{app,logs,data}/{zookeeper,kafka}
chown -Rf kafka:kafka /srv/{app,logs,data}/kafka
chown -Rf zookeeper:zookeeper /srv/{app,logs,data}/zookeeper
echo -e "# append zk_env\nexport PATH=$PATH:/srv/app/zookeeper/bin" >> /etc/profile
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/srv/data/zookeeper
dataLogDir=/srv/logs/zookeeper
clientPort=2181
autopurge.snapRetainCount=500
autopurge.purgeInterval=24
server.1= 10.100.2.21:2888:3888
server.2= 10.100.2.22:2888:3888
server.3= 10.100.2.23:2888:3888
需要创建myid
init.d/zookeeper
#!/bin/bash
#chkconfig:2345 20 90
#description:zookeeper
#processname:zookeeper
export JAVA_HOME=/srv/app/tools/java/jdk1.8.0_181
ZKUSER="root"
ZKHOME="/srv/app/zookeeper"
case $1 in
start) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh start;;
stop) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh stop;;
status) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh status;;
restart) su ${ZKUSER} ${ZKHOME}/bin/zkServer.sh restart;;
*) echo "require start|stop|status|restart" ;;
esac
chown -Rf root:root /srv/{app,data,logs}/zookeeper
[program:kafka]
command = /srv/app/kafka/bin/kafka-server-start.sh /srv/app/kafka/config/server.properties
autostart = true
startsecs = 5
autorestart = true
startretries = 3
user = kafka
redirect_stderr = true
stdout_logfile_maxbytes = 20MB
stdout_logfile_backups = 20
stdout_logfile = /srv/logs/supervisor/kafka_super.log
docker pull openresty/openresty:alpine
#!/bin/bash
for version in 6.4.2 6.7.0 7.2.0 7.4.0 7.4.2 7.5.0; do
echo ">>>>>> ${version} >>>>>>>"
docker pull elasticsearch:${version};
docker pull logstash:${version};
docker pull kibana:${version};
#docker pull filebeat:${version};
done
docker pull elasticsearch:7.4.0;
docker pull elasticsearch:6.4.2;
docker pull elasticsearch:6.7.0;
docker pull elasticsearch:7.5.0;
docker pull elasticsearch:7.4.0;
docker pull elasticsearch:6.4.2;
docker pull elasticsearch:6.7.0;
docker pull elasticsearch:7.5.0;
#bin/bash
#20170926
iptables -F
iptables -X
iptables -P INPUT DROP #INPUT链默认丢弃,需要添加ACCEPT相应的地址规则才放开
iptables -P OUTPUT ACCEPT #默认放通
iptables -P FORWARD ACCEPT #默认放通
## 封禁某个IP的访问
# 举例:
# 1. 封禁10.10.10.10访问nginx: iptables -A INPUT -s 10.10.10.10 -p tcp --dport 80 -j DROP
# 2. 封禁10.10.10.10全部访问(不允许访问任何端口): iptables -A INPUT -s 10.10.10.10 -j DROP
### 封禁区 ###
# iptables -A INPUT -s 10.10.10.10 -p tcp --dport 80 -j DROP
iptables -A INPUT -s 192.168.33.0/24 -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -s 192.168.35.0/24 -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -s 172.19.30.251 -p tcp --dport 22 -j ACCEPT #Jenkins自动升级病毒库文件
iptables -A INPUT -s 192.168.35.11 -j ACCEPT
iptables -A INPUT -s 100.100.100.225 -p tcp --sport 7000 -j ACCEPT #ELK filebeat连接redis端口
iptables -A INPUT -m multiport -p tcp --dport 80,443 -j ACCEPT #允许dport 80,443,外部请求nginx
iptables -A INPUT -m multiport -p tcp --sport 80,443 -j ACCEPT #允许sport 80,443,访问yum源
iptables -A INPUT -p udp --sport 53 -j ACCEPT #允许本地dns解析
iptables -A INPUT -m multiport -p udp --sport 123,323 -j ACCEPT #允许本地进行ntp同步时间
iptables -A INPUT -p icmp -j ACCEPT #允许ping
iptables -A INPUT -i lo -p all -j ACCEPT #允许本地回环解析
iptables -A FORWARD -f -m limit --limit 100/s --limit-burst 100 -j ACCEPT #处理IP碎片数量,防止攻击,允许每秒100个
iptables -A FORWARD -p icmp -m limit --limit 1/s --limit-burst 10 -j ACCEPT #设置ICMP包过滤,允许每秒1个包,限制触发条件是10个包.
iptables -A FORWARD -m state --state INVALID -j DROP #drop非法转发
service iptables save
service iptables restart
chkconfig iptables on
iptables -nv -L
标签:beat srv inter forward 时间 stash alpine too kibana
原文地址:https://www.cnblogs.com/lizhaojun-ops/p/12109967.html